source
stringlengths
3
92
c
stringlengths
26
2.25M
ILS3D.h
/* ################################################################################### # # BCMTools # # Copyright (c) 2011-2014 Institute of Industrial Science, The University of Tokyo. # All rights reserved. # # Copyright (c) 2012-2016 Advanced Institute for Computational Science (AICS), RIKEN. # All rights reserved. # # Copyright (c) 2017 Research Institute for Information Technology (RIIT), Kyushu University. # All rights reserved. # ################################################################################### */ #ifndef ILS3D_H #define ILS3D_H #include <cmath> #include <cfloat> #include "BlockManager.h" #include "LocalScalar3D.h" #include "real.h" #include "blas.h" #include "comm.h" #include "PM.h" class ILS3D { public: ILS3D() { } ~ILS3D() { } private: bool IsConverged( BlockManager& blockManager, real& residual, real& rr, real& bb, real& epsilon, int count, int countMax) { residual = (fabs(bb)>FLT_MIN) ? fabs(rr)/fabs(bb) : fabs(rr); if( residual <= epsilon*epsilon ) { return true; } if( isnan(residual) ) { std::cout << bb << " " << rr << std::endl; Exit(0); } if( isinf(residual) ) { Exit(0); } if( count == countMax ) { } return false; } void Jacobi_Smoother( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega) { int vc = plsx->GetVC(); PM_Start(tm_JacobiSmoother, 0, 0, true); int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {NX, NY, NZ}; PM_Start(tm_JacobiSmoother_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<NB; ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); real pomega = omega; jacobi_smoother_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, &pomega, sz, &vc); } PM_Stop(tm_JacobiSmoother_Calc, 0, 0, 16.0*NX*NY*NZ, NB); PM_Start(tm_JacobiSmoother_Swap, 0, 0, true); LSSwap(*plsx, *plsx0); PM_Stop(tm_JacobiSmoother_Swap); PM_Start(tm_JacobiSmoother_Comm, 0, 0, true); plsx->ImposeBoundaryCondition(blockManager); PM_Stop(tm_JacobiSmoother_Comm); PM_Stop(tm_JacobiSmoother); } void CalcAx( BlockManager& blockManager, LocalScalar3D<real>* plsAx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ax = plsAx->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); calc_ax_( Ax, Ap, Aw, Ae, As, An, Ab, At, x, sz, &vc); } } void CalcR( BlockManager& blockManager, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsb) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* r = plsr ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); real* b = plsb ->GetBlockData(block); calc_r_( r, Ap, Aw, Ae, As, An, Ab, At, x, b, sz, &vc); } } void CalcR2( BlockManager& blockManager, real& rr, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsb) { int vc = plsx->GetVC(); double rr_local = 0.0; #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: rr_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); real* b = plsb ->GetBlockData(block); real rr_block = 0.0; calc_r2_( &rr_block, Ap, Aw, Ae, As, An, Ab, At, x, b, sz, &vc); rr_local += rr_block; // std::cout << rr_block << std::endl; } const MPI::Intracomm& comm = blockManager.getCommunicator(); double rr_global = 0.0; allreduce_(&rr_global, &rr_local); /* #ifdef _REAL_IS_DOUBLE_ comm.Allreduce(&rr_local, &rr_global, 1, MPI_DOUBLE_PRECISION, MPI_SUM); #else comm.Allreduce(&rr_local, &rr_global, 1, MPI_FLOAT, MPI_SUM); #endif */ rr = rr_global; } void DOT( BlockManager& blockManager, real& xy, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy) { int vc = plsx->GetVC(); double xy_local = 0.0; #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: xy_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); real xy_block = 0.0; dot_(&xy_block, x, y, sz, &vc); xy_local += xy_block; } const MPI::Intracomm& comm = blockManager.getCommunicator(); double xy_global = 0.0; allreduce_(&xy_global, &xy_local); /* #ifdef _REAL_IS_DOUBLE_ comm.Allreduce(&xy_local, &xy_global, 1, MPI_DOUBLE_PRECISION, MPI_SUM); #else comm.Allreduce(&xy_local, &xy_global, 1, MPI_FLOAT, MPI_SUM); #endif */ xy = xy_global; } void AXPY( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* y = plsy->GetBlockData(block); real* x = plsx->GetBlockData(block); axpy_(y, x, &a, sz, &vc); } } void XPAY( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* y = plsy->GetBlockData(block); real* x = plsx->GetBlockData(block); xpay_(y, x, &a, sz, &vc); } } void AXPYZ( BlockManager& blockManager, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* z = plsz->GetBlockData(block); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); axpyz_(z, x, y, &a, sz, &vc); } } void AXPBYPZ( BlockManager& blockManager, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, real a, real b) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* z = plsz->GetBlockData(block); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); axpbypz_(z, x, y, &a, &b, sz, &vc); } } public: void Fill( BlockManager& blockManager, LocalScalar3D<real>* plsx, real value) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real v = value; fill_(x, &v, sz, &vc); } } void Copy( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); copy_(y, x, sz, &vc); } } void Jacobi_PreConditioner( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner ) { for(int count=0; count<countPreConditioner; count++) { Jacobi_Smoother( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, omega); } } void Jacobi( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); for(count=1; count<=countMax; ++count) { Jacobi_Smoother( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, omega); real rr = 0.0; CalcR2( blockManager, rr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } } plsx->ImposeBoundaryCondition(blockManager); } void CG( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsq, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); plsr->ImposeBoundaryCondition(blockManager); real rr0 = 1.0; real rr1 = 0.0; for(count=1; count<=countMax; ++count) { Fill(blockManager, plsz, 0.0); Jacobi_PreConditioner( blockManager, plsz, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsr, plsx0, omega, countPreConditioner); rr1 = 0.0; DOT( blockManager, rr1, plsr, plsz); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } real beta = rr1/rr0; if( count==1 ) { Copy( blockManager, plsp, plsz); } else { XPAY( blockManager, plsp, plsz, beta); } plsp->ImposeBoundaryCondition(blockManager); CalcAx( blockManager, plsq, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp); real qp = 0.0; DOT( blockManager, qp, plsq, plsp); real alpha = rr1/qp; AXPY( blockManager, plsx, plsp, alpha); AXPY( blockManager, plsr, plsq, -alpha); plsr->ImposeBoundaryCondition(blockManager); real rr = 0.0; DOT( blockManager, rr, plsr, plsr); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void BiCGSTAB( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsr0, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsp_, LocalScalar3D<real>* plsq_, LocalScalar3D<real>* plss, LocalScalar3D<real>* plss_, LocalScalar3D<real>* plst_, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); Copy( blockManager, plsr0, plsr); real rr0 = 1.0; real alpha = 0.0; real gamma = 1.0; for(count=1; count<=countMax; ++count) { real rr1 = 0.0; DOT(blockManager, rr1, plsr, plsr0); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } if( count == 1 ) { Copy( blockManager, plsp, plsr); } else { real beta = rr1/rr0*alpha/gamma; AXPY( blockManager, plsp, plsq_, -gamma); XPAY( blockManager, plsp, plsr, beta); } plsp->ImposeBoundaryCondition(blockManager); Fill(blockManager, plsp_, 0.0); Jacobi_PreConditioner( blockManager, plsp_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp, plsx0, omega, countPreConditioner); CalcAx( blockManager, plsq_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp_); real q_r0 = 0.0; DOT( blockManager, q_r0, plsq_, plsr0); alpha = rr1/q_r0; AXPYZ( blockManager, plss, plsq_, plsr, -alpha); plss->ImposeBoundaryCondition(blockManager); Fill(blockManager, plss_, 0.0); Jacobi_PreConditioner( blockManager, plss_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss, plsx0, omega, countPreConditioner); CalcAx( blockManager, plst_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss_); real t_s = 0.0; DOT( blockManager, t_s, plst_, plss); real t_t_ = 0.0; DOT( blockManager, t_t_, plst_, plst_); gamma = t_s/t_t_; AXPBYPZ( blockManager, plsx, plsp_, plss_, alpha, gamma); AXPYZ( blockManager, plsr, plst_, plss, -gamma); real rr = 0.0; DOT( blockManager, rr, plsr, plsr); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void Jacobi_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); for(count=1; count<=countMax; ++count) { Jacobi_Smoother_Mask( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, plsMaskId, omega); real rr = 0.0; CalcR2( blockManager, rr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } } plsx->ImposeBoundaryCondition(blockManager); } void BiCGSTAB_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsr0, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsp_, LocalScalar3D<real>* plsq_, LocalScalar3D<real>* plss, LocalScalar3D<real>* plss_, LocalScalar3D<real>* plst_, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT_Mask(blockManager, bb, plsb, plsb, plsMaskId); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); Copy( blockManager, plsr0, plsr); real rr0 = 1.0; real alpha = 0.0; real gamma = 1.0; for(count=1; count<=countMax; ++count) { real rr1 = 0.0; DOT_Mask(blockManager, rr1, plsr, plsr0, plsMaskId); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } if( count == 1 ) { Copy( blockManager, plsp, plsr); } else { real beta = rr1/rr0*alpha/gamma; AXPY( blockManager, plsp, plsq_, -gamma); XPAY( blockManager, plsp, plsr, beta); } plsp->ImposeBoundaryCondition(blockManager); Fill(blockManager, plsp_, 0.0); Jacobi_PreConditioner_Mask( blockManager, plsp_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp, plsx0, plsMaskId, omega, countPreConditioner); CalcAx_Mask( blockManager, plsq_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp_, plsMaskId); real q_r0 = 0.0; DOT_Mask( blockManager, q_r0, plsq_, plsr0, plsMaskId); alpha = rr1/q_r0; AXPYZ( blockManager, plss, plsq_, plsr, -alpha); plss->ImposeBoundaryCondition(blockManager); Fill(blockManager, plss_, 0.0); Jacobi_PreConditioner_Mask( blockManager, plss_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss, plsx0, plsMaskId, omega, countPreConditioner); CalcAx_Mask( blockManager, plst_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss_, plsMaskId); real t_s = 0.0; DOT_Mask( blockManager, t_s, plst_, plss, plsMaskId); real t_t_ = 0.0; DOT_Mask( blockManager, t_t_, plst_, plst_, plsMaskId); gamma = t_s/t_t_; AXPBYPZ( blockManager, plsx, plsp_, plss_, alpha, gamma); AXPYZ( blockManager, plsr, plst_, plss, -gamma); real rr = 0.0; DOT_Mask( blockManager, rr, plsr, plsr, plsMaskId); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void DOT_Mask( BlockManager& blockManager, real& xy, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, LocalScalar3D<int>* plsMaskId) { PM_Start(tm_DOT, 0, 0, true); int vc = plsx->GetVC(); double xy_local = 0.0; int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {size.x, size.y, size.z}; PM_Start(tm_DOT_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: xy_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real xy_block = 0.0; dot_mask_(&xy_block, x, y, mask, sz, &vc); xy_local += xy_block; } PM_Stop(tm_DOT_Calc, 0, 0, 2.0*NX*NY*NZ, NB); const MPI::Intracomm& comm = blockManager.getCommunicator(); double xy_global = 0.0; PM_Start(tm_DOT_Comm, 0, 0, true); allreduce_(&xy_global, &xy_local); PM_Stop(tm_DOT_Comm); xy = xy_global; PM_Stop(tm_DOT); } void Jacobi_PreConditioner_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countPreConditioner ) { for(int count=0; count<countPreConditioner; count++) { Jacobi_Smoother_Mask( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, plsMaskId, omega); } } void Jacobi_Smoother_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega) { int vc = plsx->GetVC(); PM_Start(tm_JacobiSmoother, 0, 0, true); int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {size.x, size.y, size.z}; PM_Start(tm_JacobiSmoother_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } PM_Stop(tm_JacobiSmoother_Calc, 0, 0, 16.0*NX*NY*NZ, NB); PM_Start(tm_JacobiSmoother_Swap, 0, 0, true); LSSwap(*plsx, *plsx0); PM_Stop(tm_JacobiSmoother_Swap); PM_Start(tm_JacobiSmoother_Comm, 0, 0, true); plsx->ImposeBoundaryCondition(blockManager); PM_Stop(tm_JacobiSmoother_Comm); PM_Stop(tm_JacobiSmoother); } void Jacobi_Smoother_Mask_2( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } plsx0->UpdateVirtualCells(blockManager); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x, x0, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } plsx->ImposeBoundaryCondition(blockManager); } void CalcAx_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsAx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<int>* plsMaskId) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ax = plsAx->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); calc_ax_mask_( Ax, Ap, Aw, Ae, As, An, Ab, At, x, mask, sz, &vc); } } }; #endif
openmp_task2.c
///TAFFO_TEST_ARGS -fopenmp #include <stdio.h> #define MAX_N (100) void nested_task_invocation(int index) { if (index > 0) nested_task_invocation(index-1); else #pragma omp task { printf("result: %d\n", index); } } void compute_result(int index) { nested_task_invocation(index); } int main(int argc, char *argv[]) { float array[MAX_N] __attribute__((annotate("target('array') scalar(range(0,1000) final)"))); float result __attribute__((annotate("target('result') scalar(range(0,5000) final)"))) = 0; int i; #pragma omp parallel { #pragma omp single compute_result(10); } #pragma omp parallel for for (i = 0; i < MAX_N; i++) { array[i] = i * 1.0; } for (i = 0; i < MAX_N; i++) { result += array[i]; } printf("result: %f\n", result); }
GB_unaryop__minv_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint8 // op(A') function: GB_tran__minv_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd foo void test_no_clause(void) { int i; #pragma omp for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}} #pragma omp for simd ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(void); void test_safelen(void) { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen(void) { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen(void) { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse(void) { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel #pragma omp for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd collapse(2) for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear(void) { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned(void) { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages(void) { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal(void) { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} for (int i = 0; i < 10; ++i) ; }
pzlansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a symmetric matrix. ******************************************************************************/ void plasma_pzlansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (norm) { double stub; double *workspace; double *scale; double *sumsq; //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } plasma_core_omp_zlansy(PlasmaMaxNorm, uplo, mvam, A(m, m), ldam, &stub, &work[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_dlansy(PlasmaMaxNorm, uplo, A.nt, work, A.mt, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: case PlasmaInfNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_zlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_zlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } plasma_core_omp_zlansy_aux(PlasmaOneNorm, uplo, mvam, A(m, m), ldam, &work[A.n*m+m*A.nb], sequence, request); } #pragma omp taskwait workspace = work + A.mt*A.n; plasma_core_omp_dlange(PlasmaInfNorm, A.n, A.mt, work, A.n, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: scale = work; sumsq = work + A.mt*A.nt; for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*n+m], &sumsq[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_zgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*m+n], &sumsq[A.mt*m+n], sequence, request); } } plasma_core_omp_zsyssq(uplo, mvam, A(m, m), ldam, &scale[A.mt*m+m], &sumsq[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_dsyssq_aux(A.mt, A.nt, scale, sumsq, value, sequence, request); break; } }
ctl_scroll.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : ctl_scroll.c * Description : scroll control * * + This is part of libaroma, an embedded ui toolkit. * + 12/02/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_ctl_scroll_c__ #define __libaroma_ctl_scroll_c__ #include <aroma_internal.h> #include "../ui/ui_internal.h" /* HANDLER */ dword _libaroma_ctl_scroll_msg(LIBAROMA_CONTROLP, LIBAROMA_MSGP); void _libaroma_ctl_scroll_draw (LIBAROMA_CONTROLP, LIBAROMA_CANVASP); void _libaroma_ctl_scroll_destroy(LIBAROMA_CONTROLP); byte _libaroma_ctl_scroll_thread(LIBAROMA_CONTROLP); static LIBAROMA_CONTROL_HANDLER _libaroma_ctl_scroll_handler={ message:_libaroma_ctl_scroll_msg, draw:_libaroma_ctl_scroll_draw, focus:NULL, destroy:_libaroma_ctl_scroll_destroy, thread:_libaroma_ctl_scroll_thread }; /* * SCROLL CONTROL BEHAVIOUR CONFIGURATIONS * */ /* max cache height size */ #define _LIBAROMA_CTL_SCROLL_MAX_CACHE (libaroma_fb()->h * 10) /* size of touch handle */ #define _LIBAROMA_CTL_SCROLL_HANDLE_DP 36 /* wait ms before it send down event to client */ #define _LIBAROMA_CTL_SCROLL_TOUCH_CLIENT_WAIT 120 /* minimal touch y-move in dp if client request touch message */ #define _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP 24 /* minimal touch y-move in dp if client doesn't request touch message */ #define _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP_NOITEM 5 /* #define LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE 1 #define LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD 1 */ /* * Structure : __LIBAROMA_CTL_SCROLL * Typedef : _LIBAROMA_CTL_SCROLL, * _LIBAROMA_CTL_SCROLLP * Descriptions: button control internal structure */ typedef struct __LIBAROMA_CTL_SCROLL _LIBAROMA_CTL_SCROLL; typedef struct __LIBAROMA_CTL_SCROLL * _LIBAROMA_CTL_SCROLLP; struct __LIBAROMA_CTL_SCROLL{ /* drawing & canvas */ LIBAROMA_CANVASP client_canvas; word color_bg; byte flags; /* threads */ byte active; #ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD LIBAROMA_THREAD cache_thread; #endif LIBAROMA_THREAD calc_thread; /* scrolling values */ int request_new_height; int scroll_y; int client_h; int max_scroll_y; int request_scroll_y; long scroll_tick; int scroll_state; /* cache values */ byte cache_state; byte move_state; int cache_y; int draw_y; int synced_y; long scroll_handle_time; /* touch event */ byte touched; byte handle_touched; byte allow_scroll; int touch_x; int touch_y; int touch_scroll_y; /* client touch event */ long client_touch_start; byte client_touched; /* overshoot */ byte ovs_bounce; long ovs_start; float ovs_state; float ovs_ustate; long ovs_ustart; int ovs_x; int ovs_y; LIBAROMA_CTL_SCROLL_OVERSHOOT_EFFECT ovs_custom_cb; /* fling items */ int bounce_velocity; int velocity; LIBAROMA_FLING fling; /* client data */ LIBAROMA_CTL_SCROLL_CLIENT client; LIBAROMA_MUTEX mutex; LIBAROMA_MUTEX fmutex; LIBAROMA_MUTEX blitmutex; LIBAROMA_COND_MUTEX cmutex; LIBAROMA_COND ccond; /* minscroll handler */ LIBAROMA_CTL_SCROLL_MINSCROLL_HANDLER minscroll_cb; int minscroll_y; }; /* * Function : _libaroma_ctl_scroll_client_msg * Return Value: dword * Descriptions: send client message */ dword _libaroma_ctl_scroll_client_msg( LIBAROMA_CONTROLP ctl, byte message, int x, int y ){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client.handler->message){ LIBAROMA_MSG msgc; libaroma_wm_compose( &msgc, LIBAROMA_CTL_SCROLL_MSG, NULL, message, 0 ); return me->client.handler->message( ctl, &me->client, &msgc, x, y ); } return 0; } /* End of _libaroma_ctl_scroll_client_msg */ /* * Function : _libaroma_ctl_scroll_updatecache * Return Value: byte * Descriptions: update cache drawing */ byte _libaroma_ctl_scroll_updatecache(LIBAROMA_CONTROLP ctl, int move_sz){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client_canvas==NULL){ return 0; } libaroma_mutex_lock(me->fmutex); int move_value=0; int cvhsz = (me->client_canvas->h / 2); if (move_sz<0){ /* draw top */ move_value = 0-cvhsz; if (move_value>move_sz){ move_value=move_sz; } if (me->draw_y+move_value<0){ move_value=0-me->draw_y; } } else if (move_sz>0){ /* draw bottom */ move_value = cvhsz; if (move_value<move_sz){ move_value=move_sz; } if (me->draw_y+move_value>me->max_scroll_y){ move_value=me->max_scroll_y-me->draw_y; } if (move_value<0){ move_value=0; } } if ((me->cache_state==10)||(me->cache_state==11)){ me->cache_state=0; int client_y=me->draw_y; /* force redraw all */ if (me->client.handler->draw!=NULL){ me->client.handler->draw( ctl, &me->client, me->client_canvas, 0,client_y,me->client_canvas->w,me->client_canvas->h ); } else{ libaroma_canvas_setcolor(me->client_canvas,me->color_bg,0xff); } me->cache_y=0; me->synced_y=-1; libaroma_mutex_unlock(me->fmutex); return 1; } me->cache_state=0; if (move_value!=0){ byte is_top = (move_value<0)?1:0; int cache_h = abs(move_value); int cache_y = me->cache_y+move_value; int client_y= me->draw_y+(is_top?move_value:me->client_canvas->h); if (cache_y<0){ cache_y = me->client_canvas->h + cache_y; } else if (cache_y>=me->client_canvas->h){ cache_y = cache_y-me->client_canvas->h; } /* redrawing client */ LIBAROMA_CANVASP redraw_canvas; int top_y=is_top?cache_y:cache_y-cache_h; int top_h=cache_h; int bottom_h=0; if (top_y<0){ top_h = abs(top_y); bottom_h = cache_h - top_h; top_y = me->client_canvas->h-top_h; } else if (top_y+top_h>me->client_canvas->h){ top_h = me->client_canvas->h - top_y; bottom_h = cache_h - top_h; } /* top section */ if (top_h>0){ redraw_canvas = libaroma_canvas_area( me->client_canvas, 0, top_y, me->client_canvas->w, top_h ); if (me->client.handler->draw){ me->client.handler->draw( ctl, &me->client, redraw_canvas, 0, client_y, redraw_canvas->w, redraw_canvas->h ); } else{ libaroma_canvas_setcolor(redraw_canvas,me->color_bg,0xff); } libaroma_canvas_free(redraw_canvas); } /* bottom section */ if (bottom_h>0){ redraw_canvas = libaroma_canvas_area( me->client_canvas, 0, 0, me->client_canvas->w, bottom_h ); if (me->client.handler->draw){ me->client.handler->draw( ctl, &me->client, redraw_canvas, 0, client_y+top_h, redraw_canvas->w, redraw_canvas->h ); } else{ libaroma_canvas_setcolor(redraw_canvas,me->color_bg,0xff); } libaroma_canvas_free(redraw_canvas); } /* update info */ me->cache_y=cache_y; me->draw_y+=move_value; me->synced_y=-1; libaroma_mutex_unlock(me->fmutex); return 1; } libaroma_mutex_unlock(me->fmutex); return 0; } /* End of _libaroma_ctl_scroll_updatecache */ /* * Function : _libaroma_ctl_scroll_check_update * Return Value: byte * Descriptions: check for cache update */ #ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE byte _libaroma_ctl_scroll_check_update(LIBAROMA_CONTROLP ctl){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if ((me->client.handler)&&(me->client_canvas!=NULL)){ if ((me->cache_state)&&(me->cache_state!=10)){ int cvhsz = (me->client_canvas->h / 4); int draw_top = me->draw_y; int draw_bottom = draw_top+me->client_canvas->h; if (me->scroll_y<draw_top+me->cache_y){ _libaroma_ctl_scroll_updatecache(ctl,-cvhsz); return 1; } else if (me->scroll_y>draw_bottom+me->cache_y){ _libaroma_ctl_scroll_updatecache(ctl,cvhsz); return 1; } else if (me->move_state==1){ if ((me->scroll_y<draw_top+cvhsz)&&(draw_top>0)){ _libaroma_ctl_scroll_updatecache(ctl,-cvhsz); return 1; } } else if (me->move_state==2){ if ((me->scroll_y>draw_bottom-cvhsz)&&(draw_bottom<me->client_h)){ _libaroma_ctl_scroll_updatecache(ctl,cvhsz); return 1; } } } } return 0; } /* End of _libaroma_ctl_scroll_check_update */ #endif /* * Function : _libaroma_ctl_scroll_cache_thread * Return Value: static void * * Descriptions: background cache updater */ #ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD static void * _libaroma_ctl_scroll_cache_thread(void * cookie){ LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cookie; /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); ALOGV("Start scroll updater thread"); while (me->active){ /* update new height */ if (me->client.handler){ if (me->request_new_height!=-1){ libaroma_ctl_scroll_set_height(ctl,me->request_new_height); libaroma_mutex_lock(me->fmutex); me->request_new_height=-1; libaroma_mutex_unlock(me->fmutex); } if (me->cache_state==10){ _libaroma_ctl_scroll_updatecache(ctl, 0); } #ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE else if (me->client_canvas!=NULL){ if ((me->client_h>me->client_canvas->h)&&(me->request_new_height==-1)){ _libaroma_ctl_scroll_check_update(ctl); } } #endif } libaroma_sleep(1); } ALOGV("End scroll updater thread"); return NULL; } /* End of _libaroma_ctl_scroll_cache_thread */ #endif /* * Function : _libaroma_ctl_scroll_calc_thread * Return Value: static void * * Descriptions: background calculation updater */ static void * _libaroma_ctl_scroll_calc_thread(void * cookie){ LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cookie; /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); ALOGI("Start scroll calculation thread"); byte need_drawing = 0; while (me->active){ libaroma_cond_lock(&me->cmutex); libaroma_cond_wait(&me->ccond, &me->cmutex); libaroma_cond_unlock(&me->cmutex); if (!me->active){ break; } if (me->client.handler){ /* run client thread */ libaroma_mutex_lock(me->mutex); if ((me->client_touch_start!=0)&& (libaroma_tick()-me->client_touch_start> _LIBAROMA_CTL_SCROLL_TOUCH_CLIENT_WAIT)){ me->client_touch_start=0; /* send touch down message to client */ if (me->client.handler->message){ int client_x = me->touch_x; int client_y = me->touch_y + me->scroll_y; if (_libaroma_ctl_scroll_client_msg( ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_DOWN, client_x, client_y )==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){ need_drawing=1; } me->client_touched=1; } } libaroma_mutex_unlock(me->mutex); /* client thread */ if (me->client.handler->thread!=NULL){ if (me->client.handler->thread(ctl,&me->client)){ need_drawing=1; } } /* drawing handler */ if (need_drawing){ me->synced_y=-1; need_drawing=0; } } } ALOGI("End scroll calculation thread"); return NULL; } /* End of _libaroma_ctl_scroll_calc_thread */ /* * Function : _libaroma_ctl_scroll_thread * Return Value: byte * Descriptions: control thread callback */ byte _libaroma_ctl_scroll_thread(LIBAROMA_CONTROLP ctl) { /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); byte need_drawing=0; if (me->client.handler){ libaroma_cond_lock(&me->cmutex); libaroma_cond_signal(&me->ccond); libaroma_cond_unlock(&me->cmutex); if (!me->active){ return 0; } #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel sections { #pragma omp section { #endif /* overshoot */ if (me->ovs_ustart>1){ float nowstate= libaroma_control_state(me->ovs_ustart, me->ovs_bounce==2?1000:400); if (nowstate<1){ if (nowstate!=me->ovs_ustate){ me->ovs_ustate=nowstate; need_drawing=1; } } if ((nowstate>=1)&&(me->ovs_ustate<1)){ me->ovs_state=0; me->ovs_start=0; me->ovs_ustart=0; me->ovs_ustate=0; need_drawing=1; } } else if ((me->ovs_start>0)||(me->ovs_state)){ float nowstate= libaroma_control_state(me->ovs_start,(me->ovs_bounce==1)?800:1600); if (nowstate<1){ if (nowstate!=me->ovs_state){ me->ovs_state=nowstate; need_drawing=1; } } if ((me->ovs_state<1)&&((nowstate>=1)|| ((nowstate>=0.2)&&(me->ovs_ustart==1)&&(me->ovs_state<1))) ){ me->ovs_state=0.5; me->ovs_ustart=libaroma_tick(); me->ovs_ustate=0; if (!me->ovs_bounce){ me->ovs_bounce=2; } need_drawing=1; } } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif /* fling handler */ if ((me->velocity!=0)&&(!me->touched)){ /* onfling */ me->velocity=(me->velocity*246)>>8; if ((abs(me->velocity)<256)||(me->touched)) { /* ended */ me->velocity = 0; need_drawing=1; } else{ /* still on fling */ int scroll_y = (me->velocity>>8) + me->scroll_y; if (scroll_y>=me->max_scroll_y){ scroll_y=me->max_scroll_y; if (me->scroll_y!=scroll_y){ me->bounce_velocity=MAX(-libaroma_dp(3840), MIN(libaroma_dp(3840),(me->velocity*153)>>8)); me->ovs_bounce=1; me->ovs_state=0; me->ovs_y=0; me->ovs_y=MIN(ctl->w*0.4,me->bounce_velocity>>4); me->ovs_ustate=0; me->ovs_ustart=1; me->ovs_start=libaroma_tick()-16; } me->velocity = 0; need_drawing=1; } if (scroll_y<=0){ scroll_y=0; if (me->scroll_y!=scroll_y){ me->bounce_velocity=MAX(-libaroma_dp(3840), MIN(libaroma_dp(3840),(me->velocity*153)>>8)); me->ovs_bounce=1; me->ovs_state=0; me->ovs_y=0; me->ovs_y=MAX(0-ctl->w*0.4,me->bounce_velocity>>4); me->ovs_ustate=0; me->ovs_ustart=1; me->ovs_start=libaroma_tick()-16; } me->velocity = 0; need_drawing=1; } if (scroll_y!=me->scroll_y){ libaroma_ctl_scroll_set_pos(ctl, scroll_y); } } } else if (me->request_scroll_y!=-1){ /* direct request */ if (me->request_scroll_y!=me->scroll_y){ int move_sz = ((me->request_scroll_y-me->scroll_y)*64)>>8; if (abs(move_sz)<2){ if (move_sz<0){ move_sz=-1; } else{ move_sz=1; } } int target_sz = me->scroll_y+move_sz; if (target_sz==me->request_scroll_y){ target_sz=me->request_scroll_y; me->request_scroll_y=-1; } libaroma_ctl_scroll_set_pos(ctl,target_sz); } } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif /* bounce handler */ if (me->bounce_velocity!=0){ /* bounce */ me->bounce_velocity=(me->bounce_velocity*153)>>8; if (abs(me->bounce_velocity)<256){ me->bounce_velocity=0; } need_drawing=1; } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif /* scroll indicator handler */ if (me->scroll_tick!=0){ /* scroll indicator */ if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){ long diff= libaroma_tick()-me->scroll_tick; if ((diff>1000)&&(me->scroll_state>0)){ int nowstate=round(256.0*(1.0-libaroma_control_state( me->scroll_tick+1000,400))); if (nowstate!=me->scroll_state){ me->scroll_state=nowstate; need_drawing=1; } if (me->scroll_state<=0){ me->scroll_state=0; me->scroll_tick=0; me->scroll_handle_time=0; } } else if ((diff<500)&&(me->scroll_state<256)){ if (!me->scroll_handle_time){ me->scroll_handle_time=me->scroll_tick; } int nowstate=round(256.0* libaroma_control_state(me->scroll_handle_time,400)); if (nowstate!=me->scroll_state){ me->scroll_state=nowstate; need_drawing=1; } if (me->scroll_state>=256){ me->scroll_state=256; } } } else{ me->scroll_tick=0; } } #ifdef LIBAROMA_CONFIG_OPENMP } } #endif if (need_drawing){ me->synced_y=-1; } if (me->request_new_height!=-1){ #ifndef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD libaroma_ctl_scroll_set_height(ctl,me->request_new_height); libaroma_mutex_lock(me->fmutex); me->request_new_height=-1; libaroma_mutex_unlock(me->fmutex); #else return 0; #endif } #ifndef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD if (me->cache_state==10){ _libaroma_ctl_scroll_updatecache(ctl, 0); } #endif if (me->synced_y!=me->scroll_y){ return 1; } } return 0; } /* End of _libaroma_ctl_scroll_thread */ /* * Function : _libaroma_ctl_scroll_draw * Return Value: void * Descriptions: draw callback */ void _libaroma_ctl_scroll_draw( LIBAROMA_CONTROLP ctl, LIBAROMA_CANVASP c){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, ); if (me->client.handler){ if (!me->active){ libaroma_mutex_lock(me->mutex); if (me->request_new_height!=-1){ int nrq=me->request_new_height; libaroma_mutex_unlock(me->mutex); libaroma_ctl_scroll_set_height(ctl,nrq); libaroma_mutex_lock(me->mutex); me->request_new_height=-1; libaroma_mutex_unlock(me->mutex); } else{ libaroma_mutex_unlock(me->mutex); } if (me->cache_state==10){ _libaroma_ctl_scroll_updatecache(ctl, 0); } } if (me->client_canvas!=NULL){ libaroma_mutex_lock(me->mutex); int scroll_y = me->scroll_y; int draw_y = (scroll_y-me->draw_y+me->cache_y)%me->client_canvas->h; int draw_h = ctl->h; if (me->client_canvas->h<=ctl->h){ /* no scroll */ if ((me->minscroll_cb)&&(me->minscroll_y)){ LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y); if (mscv){ libaroma_draw(mscv,me->client_canvas,0,0,0); me->minscroll_cb(ctl, mscv, me->scroll_y); libaroma_canvas_free(mscv); } } libaroma_canvas_setcolor(c,me->color_bg,0xff); libaroma_draw_ex( c, me->client_canvas, 0,0, 0,me->minscroll_y, me->client_canvas->w, me->client_canvas->h-me->minscroll_y, 0,0xff); me->synced_y=me->scroll_y; } else{ if ((me->minscroll_cb)&&(me->minscroll_y)){ int draw_yv = ((scroll_y-me->minscroll_y)-me->draw_y+me->cache_y) %me->client_canvas->h; LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y); if (mscv){ libaroma_draw_ex( mscv,me->client_canvas, 0,0, 0,draw_yv, mscv->w,mscv->h, 0,0xff ); me->minscroll_cb(ctl, mscv, me->scroll_y); libaroma_canvas_free(mscv); } } LIBAROMA_CANVASP tc=c; int bvel=me->bounce_velocity; if (bvel!=0){ libaroma_canvas_setcolor(tc,me->color_bg,0xff); c=libaroma_canvas(tc->w,tc->h); } if (draw_y<0){ draw_y=me->client_canvas->h+draw_y; } if (draw_y+draw_h>me->client_canvas->h){ int top_h = (me->client_canvas->h - draw_y); int bottom_h = draw_h - top_h; int bottom_y = 0; if (top_h<1){ bottom_h+=top_h; bottom_y=abs(top_h); } #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel sections { #pragma omp section { #endif if (top_h>0){ if (!libaroma_draw_ex( c,me->client_canvas, 0,0, 0,draw_y, c->w,top_h, 0,0xff )){ ALOGV("Error top_h: %i,%i",draw_y,draw_h); } } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif if (bottom_h>0){ if (!libaroma_draw_ex( c,me->client_canvas, 0,top_h, 0,bottom_y, c->w,bottom_h, 0,0xff )){ ALOGV("Error bottom_h: %i,%i - %i", bottom_y,bottom_h,c->h); } } #ifdef LIBAROMA_CONFIG_OPENMP } } #endif me->synced_y=me->scroll_y; } else if ((draw_y<me->client_canvas->h)&&(draw_y>=0)){ if (!libaroma_draw_ex( c,me->client_canvas, 0,0, 0,draw_y, c->w,draw_h, 0,0xff )){ ALOGV("Error draw_h: %i,%i",draw_y,draw_h); } me->synced_y=me->scroll_y; } if (bvel!=0){ int y_i = (int) bvel>>8; libaroma_draw_ex(tc,c,0,0,0,y_i,tc->w,tc->h,0,0xff); libaroma_canvas_free(c); c=tc; } } libaroma_mutex_unlock(me->mutex); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel sections { #pragma omp section { #endif if (me->active){ if ((!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR))&& (me->max_scroll_y>me->minscroll_y)){ if ((me->scroll_state>0)||(me->handle_touched)){ int hdl_w,hdl_r,ctl_y,ctl_h; byte handle_opa=180; byte is_dark = libaroma_color_isdark(me->color_bg); word indicator_color = is_dark?RGB(cccccc):RGB(666666); int vss=(me->handle_touched)?256:me->scroll_state; if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){ hdl_w = libaroma_dp(5); hdl_r = hdl_w*2; if (!me->handle_touched){ hdl_r=(hdl_r * me->scroll_state) >> 8; } /* track */ ctl_y = libaroma_dp(18); ctl_h = ctl->h - (ctl_y*2); libaroma_draw_rect( c, ctl->w-(hdl_r+libaroma_dp(3)), ctl_y, libaroma_dp(1), ctl_h, libaroma_alpha(me->color_bg,indicator_color,(80*vss)>>8), 0xff ); if (me->handle_touched){ handle_opa=0xff; indicator_color=libaroma_colorget(ctl,NULL)->primary; } else{ handle_opa=220; } } else{ ctl_y = libaroma_dp(2); hdl_w = ctl_y*2; hdl_r = libaroma_dp(5); ctl_h = ctl->h - hdl_w; handle_opa = 120; } int hdl_ch= (ctl->h * ctl_h)/me->client_h; int hdl_h = MAX(hdl_ch,libaroma_dp(36)); hdl_ch = hdl_h-hdl_ch; int hdl_y = ((scroll_y * (ctl_h-hdl_ch))/me->client_h)+ctl_y; libaroma_draw_rect( c, ctl->w-(hdl_r+hdl_w), hdl_y, hdl_w, hdl_h, libaroma_alpha(me->color_bg,indicator_color, (handle_opa*vss)>>8), 0xff ); } } } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif /* vertical border */ if (me->flags&LIBAROMA_CTL_SCROLL_WITH_VBORDER){ if (me->max_scroll_y>me->minscroll_y){ word divcolor = libaroma_color_isdark(me->color_bg)?RGB(cccccc):RGB(666666); divcolor=libaroma_alpha(me->color_bg,divcolor,50); if (scroll_y>me->minscroll_y){ libaroma_draw_rect( c, 0, 0, c->w, libaroma_dp(1), divcolor, 0xff ); } if (scroll_y<me->max_scroll_y){ libaroma_draw_rect( c, 0, c->h-libaroma_dp(1), c->w, libaroma_dp(1), divcolor, 0xff ); } } } #ifdef LIBAROMA_CONFIG_OPENMP } #pragma omp section { #endif /* shadow */ if (me->flags&LIBAROMA_CTL_SCROLL_WITH_SHADOW){ libaroma_gradient_ex1(c, 0, 0, ctl->w, libaroma_dp(5),0,0,0,0,80,0,2); } #ifdef LIBAROMA_CONFIG_OPENMP } } #endif /* overshoot draw */ if ((me->max_scroll_y>me->minscroll_y)&&(me->ovs_state>0)&&(me->ovs_state<1)){ int max_ovsz = MIN(c->h/4,libaroma_dp(100)); int overshoot_sz = MIN(abs(me->ovs_y)/3,max_ovsz); if (overshoot_sz>0){ float opa = 0; if (me->ovs_state<0.25){ opa = libaroma_cubic_bezier_easein(me->ovs_state*4); } else{ opa = 1; } if (me->ovs_ustate>0){ opa*=1-libaroma_cubic_bezier_swiftout(me->ovs_ustate); } opa = MAX(0,MIN(1,opa)); if (me->ovs_ustate>0){ overshoot_sz = overshoot_sz * opa; } else{ overshoot_sz = overshoot_sz * MIN(1,opa*2); } float opacity=((float) overshoot_sz) / max_ovsz; overshoot_sz = MIN(MIN(overshoot_sz,c->h/5),libaroma_dp(80)); if (overshoot_sz>1){ LIBAROMA_CANVASP ovshot = libaroma_canvas_ex( c->w, overshoot_sz, 1); int vw = c->w>>2; if (me->ovs_x<0){ me->ovs_x=0; } else if (me->ovs_x>ctl->w){ me->ovs_x=ctl->w; } int vx = me->ovs_x>>2; int ovw= overshoot_sz>>1; int x1 = 0-(vw-vx); int x2 = x1+c->w+vw; if (me->ovs_custom_cb!=NULL){//use ovs effect callback me->ovs_custom_cb(ctl, ovshot, (me->ovs_y<0)); } else { //default ovs effect //fill with primary color (without this it will be black) libaroma_canvas_setcolor(ovshot, libaroma_colorget(ctl,NULL)->primary,0); if (me->ovs_y<0){ LIBAROMA_PATHP path=libaroma_path(x1,0); libaroma_path_curve( path, overshoot_sz, x1+ovw, overshoot_sz, x2-ovw, overshoot_sz, x2, 0 ); libaroma_path_draw(ovshot, path, 0, 0x60*opacity, 1, 0.33); libaroma_path_free(path); } else{ LIBAROMA_PATHP path=libaroma_path(x1,overshoot_sz-1); libaroma_path_curve( path, overshoot_sz, x1+ovw, 0, x2-ovw, 0, x2,overshoot_sz-1 ); libaroma_path_draw(ovshot, path, 0, 0x60*opacity, 1, 0.33); libaroma_path_free(path); } } /* draw overshoot effect canvas on top of control */ libaroma_draw(c,ovshot,0,(me->ovs_y<0)?0:c->h-overshoot_sz,1); libaroma_canvas_free(ovshot); } } } } else{ if ((me->minscroll_cb)&&(me->minscroll_y)){ LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y); if (mscv){ libaroma_canvas_setcolor(mscv,me->color_bg,0xff); me->minscroll_cb(ctl, mscv, me->scroll_y); libaroma_canvas_free(mscv); } } libaroma_canvas_setcolor(c,me->color_bg,0xff); } } else{ if ((me->minscroll_cb)&&(me->minscroll_y)){ LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y); if (mscv){ libaroma_canvas_setcolor(mscv,me->color_bg,0xff); me->minscroll_cb(ctl, mscv, me->scroll_y); libaroma_canvas_free(mscv); } } libaroma_canvas_setcolor(c,me->color_bg,0xff); } } /* End of _libaroma_ctl_scroll_draw */ /* * Function : _libaroma_ctl_scroll_touch_handler * Return Value: dword * Descriptions: touch message handler */ dword _libaroma_ctl_scroll_touch_handler( LIBAROMA_CONTROLP ctl, LIBAROMA_MSGP msg,int x, int y, byte state){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); switch(state){ case LIBAROMA_HID_EV_STATE_DOWN:{ ALOGT("scroll_message - touch down: %i, %i",x, y); byte is_have_velocity=( (abs(me->velocity)> libaroma_dp(2)*255 )|| me->bounce_velocity)?1:0; byte is_direct_handle = 0; if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){ me->handle_touched= (x>ctl->w-libaroma_dp(_LIBAROMA_CTL_SCROLL_HANDLE_DP))?1:0; is_direct_handle = ((me->handle_touched)&&(me->scroll_state)&&(me->max_scroll_y>0))?1:0; } else{ me->handle_touched=0; } /* set fling value */ me->bounce_velocity=0; me->velocity=0; me->allow_scroll=1; me->touched=1; /* check client message */ libaroma_mutex_lock(me->mutex); me->client_touch_start=0; me->client_touched=0; if ((!is_have_velocity)&&(!is_direct_handle)&& (me->client.handler->message)){ int client_x = x; int client_y = y + me->scroll_y; if (_libaroma_ctl_scroll_client_msg( ctl,LIBAROMA_CTL_SCROLL_MSG_ISNEED_TOUCH, client_x, client_y )==LIBAROMA_CTL_SCROLL_MSG_HANDLED){ me->client_touch_start=msg->sent; /*libaroma_tick();*/ me->allow_scroll=2; } } libaroma_mutex_unlock(me->mutex); if (is_direct_handle){ me->allow_scroll=1; int ctl_h = ctl->h-libaroma_dp(36); int sarea = ctl_h - ((ctl->h * ctl_h) / me->client_h); int scr_y = y-(ctl->h/2)+(sarea/2); int req_y = (scr_y * me->max_scroll_y) / sarea; libaroma_ctl_scroll_request_pos(ctl,req_y); } else if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){ me->request_scroll_y=-1; } libaroma_fling_down(&me->fling, y); /* save touch value */ me->touch_x=x; me->touch_y=y; me->touch_scroll_y = me->scroll_y; me->ovs_x=x; } break; case LIBAROMA_HID_EV_STATE_MOVE: case LIBAROMA_HID_EV_STATE_UP:{ ALOGT("scroll_message - touch move: %i, %i",x, y); me->ovs_x=x; me->bounce_velocity=0; byte is_first_allowed = 0; if (me->allow_scroll==2){ libaroma_mutex_lock(me->mutex); int move_sz = me->touch_y - y; int client_message_param = LIBAROMA_CTL_SCROLL_MSG_TOUCH_MOVE; int scrdp=libaroma_dp( me->client_touched? _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP: _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP_NOITEM ); if (abs(move_sz)>=scrdp){ is_first_allowed = 1; me->allow_scroll=1; me->client_touch_start=0; client_message_param = LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL; } /* send client message */ if ((me->client_touched)&&(me->client.handler->message)){ int client_x = x; int client_y = y + me->scroll_y; if (_libaroma_ctl_scroll_client_msg( ctl,client_message_param, client_x, client_y )==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){ me->synced_y=-1; } if (client_message_param==LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL){ me->client_touched=0; } } libaroma_mutex_unlock(me->mutex); } /* scrolling move handler */ if ((me->allow_scroll==1)&&(me->touch_y!=y)){ int move_sz = me->touch_y - y; if (!me->handle_touched){ if (me->scroll_y+move_sz<me->minscroll_y){ if (!me->ovs_start){ me->ovs_start=msg->sent; /*libaroma_tick();*/ me->ovs_bounce=0; me->ovs_state=0; me->ovs_ustate=0; me->ovs_ustart=0; me->ovs_y=0; } me->ovs_y+=move_sz; } else if (me->scroll_y+move_sz>me->max_scroll_y){ if (!me->ovs_start){ me->ovs_start=msg->sent; /*libaroma_tick();*/ me->ovs_bounce=0; me->ovs_state=0; me->ovs_ustate=0; me->ovs_ustart=0; me->ovs_y=0; } me->ovs_y+=move_sz; } else if (!me->ovs_ustart){ me->ovs_ustate=0; me->ovs_ustart=1; me->ovs_bounce=3; } /* normal scroll */ if (is_first_allowed){ libaroma_ctl_scroll_request_pos(ctl, me->touch_scroll_y+move_sz); } else{ me->request_scroll_y=-1; libaroma_ctl_scroll_set_pos(ctl, me->touch_scroll_y+move_sz); } me->touch_scroll_y = me->scroll_y; /* set history */ libaroma_fling_move(&me->fling, y); } else if (me->max_scroll_y>0){ int ctl_h = ctl->h-libaroma_dp(36); int sarea = ctl_h - ((ctl->h * ctl_h) / me->client_h); int scr_y = y-(ctl->h/2)+(sarea/2); int req_y = (scr_y * me->max_scroll_y) / sarea; libaroma_ctl_scroll_request_pos(ctl,req_y); } me->touch_y=y; } if (state==LIBAROMA_HID_EV_STATE_UP){ ALOGT("scroll_message - touch up: %i, %i",x, y); me->bounce_velocity=0; if (!me->handle_touched){ if (me->allow_scroll){ me->velocity=(libaroma_fling_up(&me->fling, y)* libaroma_px(18))/libaroma_dp(4); if (me->velocity){ me->touched=0; } } } else if (me->allow_scroll==1){ if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){ me->scroll_tick = msg->sent; /*libaroma_tick();*/ me->scroll_state=256; me->synced_y=-1; } } /* clear item touch if initialized */ libaroma_mutex_lock(me->mutex); if ((me->client_touch_start||me->client_touched)&& (me->client.handler->message)){ int client_x = x; int client_y = y + me->scroll_y; if (me->client_touch_start){ if (_libaroma_ctl_scroll_client_msg( ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_DOWN, client_x, client_y )==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){ me->synced_y=-1; } } if (_libaroma_ctl_scroll_client_msg( ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_UP, client_x, client_y )==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){ me->synced_y=-1; } } me->client_touch_start=0; me->client_touched=0; libaroma_mutex_unlock(me->mutex); /* reset */ me->handle_touched=0; me->allow_scroll=0; me->touched=0; me->touch_x=0; me->touch_y=0; me->ovs_x=x; if (!me->ovs_ustart){ me->ovs_ustate=0; me->ovs_ustart=1; me->ovs_bounce=3; } } } break; } return 0; } /* End of _libaroma_ctl_scroll_touch_handler */ /* * Function : libaroma_ctl_scroll_isactive * Return Value: byte * Descriptions: check if control is active */ byte libaroma_ctl_scroll_isactive(LIBAROMA_CONTROLP ctl){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); return me->active; } /* End of libaroma_ctl_scroll_isactive */ /* * Function : _libaroma_ctl_scroll_msg * Return Value: byte * Descriptions: message callback */ dword _libaroma_ctl_scroll_msg( LIBAROMA_CONTROLP ctl, LIBAROMA_MSGP msg){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); switch(msg->msg){ case LIBAROMA_MSG_TOUCH: { /* touch handler */ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(NULL,ctl,&x,&y); return _libaroma_ctl_scroll_touch_handler( ctl,msg,x,y,msg->state ); } break; case LIBAROMA_MSG_WIN_ACTIVE: { /* start updater thread*/ me->active=1; me->client_touch_start=0; me->client_touched=0; me->synced_y = -1; /* start cache thread */ #ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD libaroma_thread_create( &me->cache_thread, _libaroma_ctl_scroll_cache_thread, (voidp) ctl); #endif libaroma_thread_create( &me->calc_thread, _libaroma_ctl_scroll_calc_thread, (voidp) ctl); } break; case LIBAROMA_MSG_WIN_INACTIVE: { /* stop updater thread */ me->active=0; libaroma_sleep(30); libaroma_cond_lock(&me->cmutex); libaroma_cond_signal(&me->ccond); libaroma_cond_unlock(&me->cmutex); libaroma_mutex_lock(me->mutex); libaroma_thread_join(me->calc_thread); #ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD libaroma_thread_join(me->cache_thread); me->cache_thread=0; #endif me->calc_thread=0; me->client_touch_start=0; me->client_touched=0; me->synced_y = -1; libaroma_mutex_unlock(me->mutex); } break; } return 0; } /* End of _libaroma_ctl_scroll_msg */ /* * Function : _libaroma_ctl_scroll_destroy * Return Value: void * Descriptions: destroy callback */ void _libaroma_ctl_scroll_destroy( LIBAROMA_CONTROLP ctl){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, ); /* destroy client */ if (me->client.handler->destroy!=NULL){ me->client.handler->destroy(ctl,&me->client); } if (me->client_canvas!=NULL){ libaroma_canvas_free(me->client_canvas); me->client_canvas=NULL; } libaroma_cond_free(&me->ccond, &me->cmutex); libaroma_mutex_free(me->blitmutex); libaroma_mutex_free(me->fmutex); libaroma_mutex_free(me->mutex); free(me); } /* End of _libaroma_ctl_scroll_destroy */ /* * Function : libaroma_ctl_scroll * Return Value: LIBAROMA_CONTROLP * Descriptions: create scroll control */ LIBAROMA_CONTROLP libaroma_ctl_scroll( LIBAROMA_WINDOWP win, word id, int x, int y, int w, int h, word bg_color, byte flags ){ /* init internal data */ _LIBAROMA_CTL_SCROLLP me = (_LIBAROMA_CTL_SCROLLP) calloc(sizeof(_LIBAROMA_CTL_SCROLL),1); if (!me){ ALOGW("libaroma_ctl_scroll alloc scroll memory failed"); return NULL; } libaroma_mutex_init(me->blitmutex); /* blit drawing mutex */ libaroma_mutex_init(me->fmutex); /* cache drawing mutex */ libaroma_mutex_init(me->mutex); /* control drawing mutex */ libaroma_cond_init(&me->ccond, &me->cmutex); /* set internal data */ me->flags = flags; me->color_bg = bg_color; me->request_new_height=-1; me->request_scroll_y=-1; me->synced_y = -1; /* init control */ LIBAROMA_CONTROLP ctl = libaroma_control_new( id, x, y, w, h, libaroma_dp(32),libaroma_dp(32), /* min size */ me, &_libaroma_ctl_scroll_handler, win ); if (!ctl){ free(me); } return ctl; } /* End of libaroma_ctl_scroll */ /* * Function : libaroma_ctl_scroll_request_height * Return Value: byte * Descriptions: request height */ byte libaroma_ctl_scroll_request_height(LIBAROMA_CONTROLP ctl, int h){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); libaroma_mutex_lock(me->fmutex); me->request_new_height=h; libaroma_mutex_unlock(me->fmutex); return 1; } /* End of libaroma_ctl_scroll_request_height */ /* * Function : libaroma_ctl_scroll_get_scroll * Return Value: int * Descriptions: get scroll position */ int libaroma_ctl_scroll_get_scroll(LIBAROMA_CONTROLP ctl, int * scroll_h){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (scroll_h!=NULL){ *scroll_h=me->max_scroll_y; } return me->scroll_y; } /* End of libaroma_ctl_scroll_get_scroll */ /* * Function : libaroma_ctl_scroll_get_height * Return Value: int * Descriptions: get scroll height */ int libaroma_ctl_scroll_get_height(LIBAROMA_CONTROLP ctl){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); int ret=me->client_h; libaroma_mutex_lock(me->fmutex); if (me->request_new_height!=-1){ ret=me->request_new_height; } libaroma_mutex_unlock(me->fmutex); return ret; } /* End of libaroma_ctl_scroll_get_height */ /* * Function : libaroma_ctl_scroll_set_height * Return Value: byte * Descriptions: set scroll height */ byte libaroma_ctl_scroll_set_height(LIBAROMA_CONTROLP ctl, int h){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client_h==h){ return 0; } libaroma_mutex_lock(me->blitmutex); libaroma_mutex_lock(me->fmutex); me->max_scroll_y = h-ctl->h; if (me->max_scroll_y<me->minscroll_y){ me->max_scroll_y=me->minscroll_y; } if (h<1){ if (me->client_canvas!=NULL){ me->client_h = h; libaroma_mutex_lock(me->mutex); libaroma_canvas_free(me->client_canvas); me->client_canvas=NULL; libaroma_mutex_unlock(me->mutex); } } else{ /* max 3x control height */ int valid_height = h; #ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE if (valid_height>_LIBAROMA_CTL_SCROLL_MAX_CACHE){ valid_height=_LIBAROMA_CTL_SCROLL_MAX_CACHE; } #endif LIBAROMA_CANVASP c=me->client_canvas; if (me->client_canvas){ if (valid_height!=c->h){ int ns = c->l * valid_height; if (ns>c->s){ libaroma_mutex_lock(me->mutex); c->data=realloc(c->data,ns*2); libaroma_mutex_unlock(me->mutex); c->s=ns; c->h=valid_height; me->client_h = h; } else{ me->client_h = h; c->s=ns; c->h=valid_height; libaroma_mutex_lock(me->mutex); c->data=realloc(c->data,ns*2); libaroma_mutex_unlock(me->mutex); } } else{ me->client_h = h; } } else{ libaroma_mutex_lock(me->mutex); c = libaroma_canvas(ctl->w,valid_height); libaroma_canvas_setcolor(c,me->color_bg,0xff); me->client_canvas = c; libaroma_mutex_unlock(me->mutex); me->client_h = h; } } me->synced_y=-1; libaroma_mutex_unlock(me->fmutex); libaroma_mutex_unlock(me->blitmutex); libaroma_ctl_scroll_set_pos(ctl,me->scroll_y); me->cache_state = 10; /* force recalculate */ return 1; } /* End of libaroma_ctl_scroll_set_height */ /* * Function : libaroma_ctl_scroll_set_pos * Return Value: byte * Descriptions: set scroll position - directly */ byte libaroma_ctl_scroll_set_pos(LIBAROMA_CONTROLP ctl, int scroll_y){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); int req_scroll_y = scroll_y; if (req_scroll_y>me->max_scroll_y){ req_scroll_y = me->max_scroll_y; } if (req_scroll_y<me->minscroll_y){ req_scroll_y=me->minscroll_y; } if (me->scroll_y!=req_scroll_y){ me->move_state=(req_scroll_y<me->scroll_y)?1:2; me->scroll_y=req_scroll_y; if (!me->cache_state){ me->cache_state=1; } if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){ me->scroll_tick = libaroma_tick(); } } return 1; } /* End of libaroma_ctl_scroll_set_pos */ /* * Function : libaroma_ctl_scroll_request_pos * Return Value: byte * Descriptions: request to change scroll position - nicely */ byte libaroma_ctl_scroll_request_pos(LIBAROMA_CONTROLP ctl, int req_y){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (req_y>me->max_scroll_y){ me->request_scroll_y=me->max_scroll_y; } else if (req_y<me->minscroll_y){ me->request_scroll_y=me->minscroll_y; } else{ me->request_scroll_y=req_y; } return 1; } /* End of libaroma_ctl_scroll_request_pos */ /* * Function : libaroma_ctl_scroll_get_bg_color * Return Value: byte * Descriptions: request to change scroll position - nicely */ word libaroma_ctl_scroll_get_bg_color(LIBAROMA_CONTROLP ctl){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); return me->color_bg; } /* End of libaroma_ctl_scroll_get_bg_color */ /* * Function : libaroma_ctl_scroll_set_client * Return Value: byte * Descriptions: set client handler */ byte libaroma_ctl_scroll_set_client( LIBAROMA_CONTROLP ctl, voidp internal, LIBAROMA_CTL_SCROLL_CLIENT_HANDLERP handler ){ if (handler==NULL){ return 0; } /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client.handler){ if (me->client.handler->destroy!=NULL){ me->client.handler->destroy(ctl,&me->client); } } me->client.handler=handler; me->client.internal=internal; me->synced_y=-1; me->cache_state = 10; /* force recalculate */ return 1; } /* End of libaroma_ctl_scroll_set_client */ /* * Function : libaroma_ctl_scroll_get_client * Return Value: LIBAROMA_CTL_SCROLL_CLIENTP * Descriptions: get scroll client data */ LIBAROMA_CTL_SCROLL_CLIENTP libaroma_ctl_scroll_get_client( LIBAROMA_CONTROLP ctl){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, NULL ); if (!me->client.handler){ return NULL; } return &me->client; } /* End of libaroma_ctl_scroll_get_client */ /* * Function : libaroma_ctl_scroll_is_visible * Return Value: byte * Descriptions: is this area visible? */ byte libaroma_ctl_scroll_is_visible( LIBAROMA_CONTROLP ctl, int y, int h ){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client_canvas==NULL){ return 0; } if (!me->active){ return 0; } int draw_t=me->draw_y; int draw_b=draw_t+me->client_canvas->h; int bottom = y+h; if ((bottom>draw_t)&&(y<draw_b)){ return 1; } return 0; } /* End of libaroma_ctl_scroll_is_visible */ /* * Function : libaroma_ctl_scroll_blit * Return Value: byte * Descriptions: blit canvas into client canvas */ byte libaroma_ctl_scroll_blit( LIBAROMA_CONTROLP ctl, LIBAROMA_CANVASP canvas, int x, int y, int w, int h, byte erase ){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (me->client_canvas==NULL){ return 0; } if (!me->active){ return 0; } if (x<0){ x=0; } if ((w<1)||(x+w>me->client_canvas->w)){ w=me->client_canvas->w-x; } int bottom = y+h; int draw_t = me->draw_y; int draw_b = draw_t+me->client_canvas->h; if ((bottom>draw_t)&&(y<draw_b)){ int dy = (y-draw_t+me->cache_y)%me->client_canvas->h; int split_h = (dy+h)-me->client_canvas->h; byte is_split=((dy+h>me->client_canvas->h)&&(me->cache_y)&&(split_h>0)); libaroma_mutex_lock(me->blitmutex); if (erase){ libaroma_draw_rect( me->client_canvas, x, dy, w, h, me->color_bg, 0xff ); if (is_split){ libaroma_draw_rect( me->client_canvas, x, 0, w, split_h, me->color_bg, 0xff ); } } libaroma_draw_ex( me->client_canvas, canvas, x, dy, 0, 0, w, h, 1, 0xff ); if (is_split){ libaroma_draw_ex( me->client_canvas, canvas, x, 0, 0, h-split_h, w, split_h, 1, 0xff ); } libaroma_mutex_unlock(me->blitmutex); return 1; } return 0; } /* End of libaroma_ctl_scroll_blit */ /* * Function : libaroma_ctl_scroll_set_min_scroll * Return Value: byte * Descriptions: set minimal scroll y */ byte libaroma_ctl_scroll_set_min_scroll( LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_MINSCROLL_HANDLER cb, int y ){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); if (y<0){ return 0; } libaroma_mutex_lock(me->fmutex); me->minscroll_cb=cb; me->minscroll_y=y; me->synced_y=-1; libaroma_mutex_unlock(me->fmutex); return 1; } /* * Function : libaroma_ctl_scroll_set_ovs_callback * Return Value: byte * Descriptions: set overscroll effect callback */ byte libaroma_ctl_scroll_set_ovs_callback( LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_OVERSHOOT_EFFECT cb ){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0 ); libaroma_mutex_lock(me->fmutex); me->ovs_custom_cb=cb; libaroma_mutex_unlock(me->fmutex); return 1; } #endif /* __libaroma_ctl_scroll_c__ */
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust), QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ exception=(&image->exception); if (IsGrayImage(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,1.0/gamma); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), 1.0/gamma)); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),1.0/gamma)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),1.0/gamma)); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),1.0/gamma)); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ static inline MagickRealType MagickMax(const MagickRealType x, const MagickRealType y) { if (x > y) return(x); return(y); } static inline MagickRealType MagickMin(const MagickRealType x, const MagickRealType y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } switch (image->intensity) { case Rec601LuminancePixelIntensityMethod: case Rec709LuminancePixelIntensityMethod: { (void) SetImageColorspace(image,RGBColorspace); break; } case Rec601LumaPixelIntensityMethod: case Rec709LumaPixelIntensityMethod: case UndefinedPixelIntensityMethod: { (void) SetImageColorspace(image,sRGBColorspace); break; } default: break; } /* Grayscale image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImageChannel) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/ gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickFalse; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity, Quantum *red,Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
fibonacci_cutoff.c
#include "timing.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> long int fibonacci_seq(int n) { long int x, y; if (n < 2) { return n; } else { x = fibonacci_seq(n-1); y = fibonacci_seq(n-2); return (x+y); } } long int fibonacci(int n, int level, int cutoff) { long int x, y; if (n < 2) { return n; } else if (level < cutoff) { #pragma omp task shared(x) x = fibonacci(n-1, level+1, cutoff); #pragma omp task shared(y) y = fibonacci(n-2, level+1, cutoff); #pragma omp taskwait return (x+y); } else { x = fibonacci_seq(n - 1); y = fibonacci_seq(n - 2); return (x+y); } } int main() { int n = 42; int cutoff = 10; double t1, t2; long int fib = 0; t1 = second(); #pragma omp parallel { #pragma omp single nowait { fib = fibonacci(n, 0, cutoff); } } t2 = second(); printf("Fib(%d) = %ld (in %g [s])\n", n, fib, (t2-t1)); return 0; }
GB_unop__sinh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fc64_fc64 // op(A') function: GB_unop_tran__sinh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csinh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csinh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(24*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(24*t3+Nx+11,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
test-libmvec-alias-mod.c
/* Part of test to build shared library to ensure link against *_finite aliases from libmvec. Copyright (C) 2016-2017 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #define N 4000 FLOAT log_arg[N]; FLOAT exp_arg[N]; FLOAT log_res[N]; FLOAT exp_res[N]; FLOAT pow_res[N]; int arch_check = 1; static void init_arg (void) { int i; CHECK_ARCH_EXT; arch_check = 0; for (i = 0; i < N; i += 1) { log_arg[i] = 1.0; exp_arg[i] = 0.0; } } int test_finite_alias (void) { int i; init_arg (); if (arch_check) return 77; #pragma omp simd for (i = 0; i < N; i += 1) { log_res[i] = FUNC (log) (log_arg[i]); exp_res[i] = FUNC (exp) (exp_arg[i]); pow_res[i] = FUNC (pow) (log_arg[i], log_arg[i]); } if (log_res[0] != 0.0) return 1; if (exp_res[0] != 1.0) return 1; if (pow_res[0] != 1.0) return 1; return 0; }
cones.c
#include "cones.h" #include "linalg.h" #include "scs.h" #include "scs_blas.h" /* contains BLAS(X) macros and type info */ #include "util.h" #define CONE_RATE (2) #define CONE_TOL (1e-8) #define CONE_THRESH (1e-6) #define EXP_CONE_MAX_ITERS (100) #define POW_CONE_MAX_ITERS (20) #ifdef USE_LAPACK void BLAS(syevr)(const char *jobz, const char *range, const char *uplo, blas_int *n, scs_float *a, blas_int *lda, scs_float *vl, scs_float *vu, blas_int *il, blas_int *iu, scs_float *abstol, blas_int *m, scs_float *w, scs_float *z, blas_int *ldz, blas_int *isuppz, scs_float *work, blas_int *lwork, blas_int *iwork, blas_int *liwork, blas_int *info); void BLAS(syr)(const char *uplo, const blas_int *n, const scs_float *alpha, const scs_float *x, const blas_int *incx, scs_float *a, const blas_int *lda); void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx, const blas_int *incx); scs_float BLAS(nrm2)(const blas_int *n, scs_float *x, const blas_int *incx); #endif static scs_int get_sd_cone_size(scs_int s) { RETURN(s * (s + 1)) / 2; } /* * boundaries will contain array of indices of rows of A corresponding to * cone boundaries, boundaries[0] is starting index for cones of size strictly * larger than 1 * RETURNs length of boundaries array, boundaries malloc-ed here so should be * freed */ scs_int SCS(get_cone_boundaries)(const ScsCone *k, scs_int **boundaries) { scs_int i, count = 0; scs_int len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize; scs_int *b = (scs_int *)scs_calloc(len, sizeof(scs_int)); b[count] = k->f + k->l; count += 1; if (k->qsize > 0) { memcpy(&b[count], k->q, k->qsize * sizeof(scs_int)); } count += k->qsize; for (i = 0; i < k->ssize; ++i) { b[count + i] = get_sd_cone_size(k->s[i]); } count += k->ssize; for (i = 0; i < k->ep + k->ed; ++i) { b[count + i] = 3; } count += k->ep + k->ed; for (i = 0; i < k->psize; ++i) { b[count + i] = 3; } count += k->psize; *boundaries = b; RETURN len; } static scs_int get_full_cone_dims(const ScsCone *k) { scs_int i, c = 0; if (k->f) { c += k->f; } if (k->l) { c += k->l; } if (k->qsize && k->q) { for (i = 0; i < k->qsize; ++i) { c += k->q[i]; } } if (k->ssize && k->s) { for (i = 0; i < k->ssize; ++i) { c += get_sd_cone_size(k->s[i]); } } if (k->ed) { c += 3 * k->ed; } if (k->ep) { c += 3 * k->ep; } if (k->p) { c += 3 * k->psize; } RETURN c; } scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k) { scs_int i; if (get_full_cone_dims(k) != d->m) { scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n", (long)get_full_cone_dims(k), (long)d->m); RETURN - 1; } if (k->f && k->f < 0) { scs_printf("free cone error\n"); RETURN - 1; } if (k->l && k->l < 0) { scs_printf("lp cone error\n"); RETURN - 1; } if (k->qsize && k->q) { if (k->qsize < 0) { scs_printf("soc cone error\n"); RETURN - 1; } for (i = 0; i < k->qsize; ++i) { if (k->q[i] < 0) { scs_printf("soc cone error\n"); RETURN - 1; } } } if (k->ssize && k->s) { if (k->ssize < 0) { scs_printf("sd cone error\n"); RETURN - 1; } for (i = 0; i < k->ssize; ++i) { if (k->s[i] < 0) { scs_printf("sd cone error\n"); RETURN - 1; } } } if (k->ed && k->ed < 0) { scs_printf("ep cone error\n"); RETURN - 1; } if (k->ep && k->ep < 0) { scs_printf("ed cone error\n"); RETURN - 1; } if (k->psize && k->p) { if (k->psize < 0) { scs_printf("power cone error\n"); RETURN - 1; } for (i = 0; i < k->psize; ++i) { if (k->p[i] < -1 || k->p[i] > 1) { scs_printf("power cone error, values must be in [-1,1]\n"); RETURN - 1; } } } RETURN 0; } char *SCS(get_cone_summary)(const ScsInfo *info, ScsConeWork *c) { char *str = (char *)scs_malloc(sizeof(char) * 64); sprintf(str, "\tCones: avg projection time: %1.2es\n", c->total_cone_time / (info->iter + 1) / 1e3); c->total_cone_time = 0.0; RETURN str; } void SCS(finish_cone)(ScsConeWork *c) { DEBUG_FUNC #ifdef USE_LAPACK if (c->Xs) { scs_free(c->Xs); } if (c->Z) { scs_free(c->Z); } if (c->e) { scs_free(c->e); } if (c->work) { scs_free(c->work); } if (c->iwork) { scs_free(c->iwork); } #endif if (c) { scs_free(c); } RETURN; } char *SCS(get_cone_header)(const ScsCone *k) { char *tmp = (char *)scs_malloc(sizeof(char) * 512); scs_int i, soc_vars, soc_blks, sd_vars, sd_blks; sprintf(tmp, "Cones:"); if (k->f) { sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %li\n", (long)k->f); } if (k->l) { sprintf(tmp + strlen(tmp), "\tlinear vars: %li\n", (long)k->l); } soc_vars = 0; soc_blks = 0; if (k->qsize && k->q) { soc_blks = k->qsize; for (i = 0; i < k->qsize; i++) { soc_vars += k->q[i]; } sprintf(tmp + strlen(tmp), "\tsoc vars: %li, soc blks: %li\n", (long)soc_vars, (long)soc_blks); } sd_vars = 0; sd_blks = 0; if (k->ssize && k->s) { sd_blks = k->ssize; for (i = 0; i < k->ssize; i++) { sd_vars += get_sd_cone_size(k->s[i]); } sprintf(tmp + strlen(tmp), "\tsd vars: %li, sd blks: %li\n", (long)sd_vars, (long)sd_blks); } if (k->ep || k->ed) { sprintf(tmp + strlen(tmp), "\texp vars: %li, dual exp vars: %li\n", (long)(3 * k->ep), (long)(3 * k->ed)); } if (k->psize && k->p) { sprintf(tmp + strlen(tmp), "\tprimal + dual power vars: %li\n", (long)(3 * k->psize)); } RETURN tmp; } static scs_int is_simple_semi_definite_cone(scs_int *s, scs_int ssize) { scs_int i; for (i = 0; i < ssize; i++) { if (s[i] > 2) { RETURN 0; /* false */ } } RETURN 1; /* true */ } static scs_float exp_newton_one_d(scs_float rho, scs_float y_hat, scs_float z_hat) { scs_float t = MAX(-z_hat, 1e-6); scs_float f, fp; scs_int i; for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1 / t; t = t - f / fp; if (t <= -z_hat) { RETURN 0; } else if (t <= 0) { RETURN z_hat; } else if (ABS(f) < CONE_TOL) { break; } } RETURN t + z_hat; } static void exp_solve_for_x_with_rho(scs_float *v, scs_float *x, scs_float rho) { x[2] = exp_newton_one_d(rho, v[1], v[2]); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } static scs_float exp_calc_grad(scs_float *v, scs_float *x, scs_float rho) { exp_solve_for_x_with_rho(v, x, rho); if (x[1] <= 1e-12) { RETURN x[0]; } RETURN x[0] + x[1] * log(x[1] / x[2]); } static void exp_get_rho_ub(scs_float *v, scs_float *x, scs_float *ub, scs_float *lb) { *lb = 0; *ub = 0.125; while (exp_calc_grad(v, x, *ub) > 0) { *lb = *ub; (*ub) *= 2; } } /* project onto the exponential cone, v has dimension *exactly* 3 */ static scs_int proj_exp_cone(scs_float *v) { scs_int i; scs_float ub, lb, rho, g, x[3]; scs_float r = v[0], s = v[1], t = v[2]; scs_float tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF((iter + 1), CONE_RATE)); */ /* v in cl(Kexp) */ if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) || (r <= 0 && s == 0 && t >= 0)) { RETURN 0; } /* -v in Kexp^* */ if ((-r < 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) || (-r == 0 && -s >= 0 && -t >= 0)) { memset(v, 0, 3 * sizeof(scs_float)); RETURN 0; } /* special case with analytical solution */ if (r < 0 && s < 0) { v[1] = 0.0; v[2] = MAX(v[2], 0); RETURN 0; } /* iterative procedure to find projection, bisects on dual variable: */ exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */ for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { rho = (ub + lb) / 2; /* halfway between upper and lower bounds */ g = exp_calc_grad(v, x, rho); /* calculates gradient wrt dual var */ if (g > 0) { lb = rho; } else { ub = rho; } if (ub - lb < tol) { break; } } /* #if EXTRA_VERBOSE > 0 scs_printf("exponential cone proj iters %i\n", i); #endif */ v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; RETURN 0; } static scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) { #ifdef USE_LAPACK scs_int i; blas_int n_max = 0; scs_float eig_tol = 1e-8; blas_int neg_one = -1; blas_int m = 0; blas_int info = 0; scs_float wkopt = 0.0; #if EXTRA_VERBOSE > 0 #define _STR_EXPAND(tok) #tok #define _STR(tok) _STR_EXPAND(tok) scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func))); #endif /* eigenvector decomp workspace */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] > n_max) { n_max = (blas_int)k->s[i]; } } c->Xs = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->Z = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->e = (scs_float *)scs_calloc(n_max, sizeof(scs_float)); c->liwork = 0; BLAS(syevr) ("Vectors", "All", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, SCS_NULL, SCS_NULL, SCS_NULL, &eig_tol, &m, c->e, c->Z, &n_max, SCS_NULL, &wkopt, &neg_one, &(c->liwork), &neg_one, &info); if (info != 0) { scs_printf("FATAL: syevr failure, info = %li\n", (long)info); RETURN - 1; } c->lwork = (blas_int)(wkopt + 0.01); /* 0.01 for int casting safety */ c->work = (scs_float *)scs_calloc(c->lwork, sizeof(scs_float)); c->iwork = (blas_int *)scs_calloc(c->liwork, sizeof(blas_int)); if (!c->Xs || !c->Z || !c->e || !c->work || !c->iwork) { RETURN - 1; } RETURN 0; #else scs_printf( "FATAL: Cannot solve SDPs with > 2x2 matrices without linked " "blas+lapack libraries\n"); scs_printf( "Install blas+lapack and re-compile SCS with blas+lapack libray " "locations\n"); RETURN - 1; #endif } ScsConeWork *SCS(init_cone)(const ScsCone *k) { ScsConeWork *c = (ScsConeWork *)scs_calloc(1, sizeof(ScsConeWork)); #if EXTRA_VERBOSE > 0 scs_printf("init_cone\n"); #endif c->total_cone_time = 0.0; if (k->ssize && k->s) { if (!is_simple_semi_definite_cone(k->s, k->ssize) && set_up_sd_cone_work_space(c, k) < 0) { SCS(finish_cone)(c); RETURN SCS_NULL; } } #if EXTRA_VERBOSE > 0 scs_printf("init_cone complete\n"); #ifdef MATLAB_MEX_FILE mexEvalString("drawnow;"); #endif #endif RETURN c; } static scs_int project_2x2_sdc(scs_float *X) { scs_float a, b, d, l1, l2, x1, x2, rad; scs_float sqrt2 = SQRTF(2.0); a = X[0]; b = X[1] / sqrt2; d = X[2]; if (ABS(b) < 1e-6) { /* diagonal matrix */ X[0] = MAX(a, 0); X[1] = 0; X[2] = MAX(d, 0); RETURN 0; } rad = SQRTF((a - d) * (a - d) + 4 * b * b); /* l1 >= l2 always, since rad >= 0 */ l1 = 0.5 * (a + d + rad); l2 = 0.5 * (a + d - rad); #if EXTRA_VERBOSE > 0 scs_printf( "2x2 SD: a = %4f, b = %4f, (X[1] = %4f, X[2] = %4f), d = %4f, " "rad = %4f, l1 = %4f, l2 = %4f\n", a, b, X[1], X[2], d, rad, l1, l2); #endif if (l2 >= 0) { /* both eigs positive already */ RETURN 0; } if (l1 <= 0) { /* both eigs negative, set to 0 */ X[0] = 0; X[1] = 0; X[2] = 0; RETURN 0; } /* l1 pos, l2 neg */ x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b); x2 = x1 * (l1 - a) / b; X[0] = l1 * x1 * x1; X[1] = (l1 * x1 * x2) * sqrt2; X[2] = l1 * x2 * x2; RETURN 0; } /* size of X is get_sd_cone_size(n) */ static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n, ScsConeWork *c) { /* project onto the positive semi-definite cone */ #ifdef USE_LAPACK scs_int i; blas_int one = 1; blas_int m = 0; blas_int nb = (blas_int)n; blas_int nb_plus_one = (blas_int)(n + 1); blas_int cone_sz = (blas_int)(get_sd_cone_size(n)); scs_float sqrt2 = SQRTF(2.0); scs_float sqrt2Inv = 1.0 / sqrt2; scs_float *Xs = c->Xs; scs_float *Z = c->Z; scs_float *e = c->e; scs_float *work = c->work; blas_int *iwork = c->iwork; blas_int lwork = c->lwork; blas_int liwork = c->liwork; scs_float eig_tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF(iter + 1, CONE_RATE)); */ scs_float zero = 0.0; blas_int info = 0; scs_float vupper = 0.0; #endif if (n == 0) { RETURN 0; } if (n == 1) { if (X[0] < 0.0) { X[0] = 0.0; } RETURN 0; } if (n == 2) { RETURN project_2x2_sdc(X); } #ifdef USE_LAPACK memset(Xs, 0, n * n * sizeof(scs_float)); /* expand lower triangular matrix to full matrix */ for (i = 0; i < n; ++i) { memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]), (n - i) * sizeof(scs_float)); } /* rescale so projection works, and matrix norm preserved see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3 */ /* scale diags by sqrt(2) */ BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */ /* max-eig upper bounded by frobenius norm */ vupper = 1.1 * sqrt2 * BLAS(nrm2)(&cone_sz, X, &one); /* mult by factor to make sure is upper bound */ vupper = MAX(vupper, 0.01); #if EXTRA_VERBOSE > 0 SCS(print_array)(Xs, n * n, "Xs"); SCS(print_array)(X, get_sd_cone_size(n), "X"); #endif /* Solve eigenproblem, reuse workspaces */ BLAS(syevr) ("Vectors", "VInterval", "Lower", &nb, Xs, &nb, &zero, &vupper, SCS_NULL, SCS_NULL, &eig_tol, &m, e, Z, &nb, SCS_NULL, work, &lwork, iwork, &liwork, &info); #if EXTRA_VERBOSE > 0 if (info != 0) { scs_printf("WARN: LAPACK syevr error, info = %i\n", info); } scs_printf("syevr input parameter dump:\n"); scs_printf("nb = %li\n", (long)nb); scs_printf("lwork = %li\n", (long)lwork); scs_printf("liwork = %li\n", (long)liwork); scs_printf("vupper = %f\n", vupper); scs_printf("eig_tol = %e\n", eig_tol); SCS(print_array)(e, m, "e"); SCS(print_array)(Z, m * n, "Z"); #endif if (info < 0) { RETURN - 1; } memset(Xs, 0, n * n * sizeof(scs_float)); for (i = 0; i < m; ++i) { scs_float a = e[i]; BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, Xs, &nb); } /* scale diags by 1/sqrt(2) */ BLAS(scal)(&nb, &sqrt2Inv, Xs, &nb_plus_one); /* not n_squared */ /* extract just lower triangular matrix */ for (i = 0; i < n; ++i) { memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]), (n - i) * sizeof(scs_float)); } #if EXTRA_VERBOSE > 0 SCS(print_array)(Xs, n * n, "Xs"); SCS(print_array)(X, get_sd_cone_size(n), "X"); #endif #else scs_printf( "FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack " "libraries were linked!\n"); scs_printf("SCS will RETURN nonsense!\n"); SCS(scale_array)(X, NAN, n); RETURN - 1; #endif RETURN 0; } static scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh, scs_float a) { scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r)); RETURN MAX(x, 1e-12); } static scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r, scs_float a) { RETURN a *(rh - 2 * r) / (2 * x - xh); } static scs_float pow_calc_f(scs_float x, scs_float y, scs_float r, scs_float a) { RETURN POWF(x, a) * POWF(y, (1 - a)) - r; } static scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr, scs_float a) { RETURN POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) - 1; } static void proj_power_cone(scs_float *v, scs_float a) { scs_float xh = v[0], yh = v[1], rh = ABS(v[2]); scs_float x = 0.0, y = 0.0, r; scs_int i; /* v in K_a */ if (xh >= 0 && yh >= 0 && CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) { RETURN; } /* -v in K_a^* */ if (xh <= 0 && yh <= 0 && CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >= rh * POWF(a, a) * POWF(1 - a, 1 - a)) { v[0] = v[1] = v[2] = 0; RETURN; } r = rh / 2; for (i = 0; i < POW_CONE_MAX_ITERS; ++i) { scs_float f, fp, dxdr, dydr; x = pow_calc_x(r, xh, rh, a); y = pow_calc_x(r, yh, rh, 1 - a); f = pow_calc_f(x, y, r, a); if (ABS(f) < CONE_TOL) { break; } dxdr = pow_calcdxdr(x, xh, rh, r, a); dydr = pow_calcdxdr(y, yh, rh, r, (1 - a)); fp = pow_calc_fp(x, y, dxdr, dydr, a); r = MAX(r - f / fp, 0); r = MIN(r, rh); } v[0] = x; v[1] = y; v[2] = (v[2] < 0) ? -(r) : (r); } /* outward facing cone projection routine, iter is outer algorithm iteration, if iter < 0 then iter is ignored warm_start contains guess of projection (can be set to SCS_NULL) */ scs_int SCS(proj_dual_cone)(scs_float *x, const ScsCone *k, ScsConeWork *c, const scs_float *warm_start, scs_int iter) { DEBUG_FUNC scs_int i; scs_int count = (k->f ? k->f : 0); SCS(timer) cone_timer; #if EXTRA_VERBOSE > 0 SCS(timer) proj_timer; SCS(tic)(&proj_timer); #endif SCS(tic)(&cone_timer); if (k->l) { /* project onto positive orthant */ for (i = count; i < count + k->l; ++i) { if (x[i] < 0.0) { x[i] = 0.0; } /* x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */ } count += k->l; #if EXTRA_VERBOSE > 0 scs_printf("pos orthant proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } if (k->qsize && k->q) { /* project onto SOC */ for (i = 0; i < k->qsize; ++i) { if (k->q[i] == 0) { continue; } if (k->q[i] == 1) { if (x[count] < 0.0) { x[count] = 0.0; } } else { scs_float v1 = x[count]; scs_float s = SCS(norm)(&(x[count + 1]), k->q[i] - 1); scs_float alpha = (s + v1) / 2.0; if (s <= v1) { /* do nothing */ } else if (s <= -v1) { memset(&(x[count]), 0, k->q[i] * sizeof(scs_float)); } else { x[count] = alpha; SCS(scale_array)(&(x[count + 1]), alpha / s, k->q[i] - 1); } } count += k->q[i]; } #if EXTRA_VERBOSE > 0 scs_printf("SOC proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } if (k->ssize && k->s) { /* project onto PSD cone */ for (i = 0; i < k->ssize; ++i) { #if EXTRA_VERBOSE > 0 scs_printf("SD proj size %li\n", (long)k->s[i]); #endif if (k->s[i] == 0) { continue; } if (proj_semi_definite_cone(&(x[count]), k->s[i], c) < 0) { RETURN - 1; } count += get_sd_cone_size(k->s[i]); } #if EXTRA_VERBOSE > 0 scs_printf("SD proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } if (k->ep) { scs_float r, s, t; scs_int idx; /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ SCS(scale_array)(&(x[count]), -1, 3 * k->ep); /* x = -x; */ #ifdef _OPENMP #pragma omp parallel for private(r, s, t, idx) #endif for (i = 0; i < k->ep; ++i) { idx = count + 3 * i; r = x[idx]; s = x[idx + 1]; t = x[idx + 2]; proj_exp_cone(&(x[idx])); x[idx] -= r; x[idx + 1] -= s; x[idx + 2] -= t; } count += 3 * k->ep; #if EXTRA_VERBOSE > 0 scs_printf("EP proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } if (k->ed) { /* exponential cone: */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < k->ed; ++i) { proj_exp_cone(&(x[count + 3 * i])); } count += 3 * k->ed; #if EXTRA_VERBOSE > 0 scs_printf("ED proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } if (k->psize && k->p) { scs_float v[3]; scs_int idx; /* don't use openmp for power cone ifdef _OPENMP pragma omp parallel for private(v, idx) endif */ for (i = 0; i < k->psize; ++i) { idx = count + 3 * i; if (k->p[i] <= 0) { /* dual power cone */ proj_power_cone(&(x[idx]), -k->p[i]); } else { /* primal power cone, using Moreau */ v[0] = -x[idx]; v[1] = -x[idx + 1]; v[2] = -x[idx + 2]; proj_power_cone(v, k->p[i]); x[idx] += v[0]; x[idx + 1] += v[1]; x[idx + 2] += v[2]; } } count += 3 * k->psize; #if EXTRA_VERBOSE > 0 scs_printf("Power cone proj time: %1.2es\n", SCS(tocq)(&proj_timer) / 1e3); SCS(tic)(&proj_timer); #endif } /* project onto OTHER cones */ if (c) { c->total_cone_time += SCS(tocq)(&cone_timer); } RETURN 0; }
mpm_search_element_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Bodhinanda Chandra // #ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY #define KRATOS_MPM_SEARCH_ELEMENT_UTILITY // System includes // External includes // Project includes #include "includes/define.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/quadrature_points_utility.h" #include "particle_mechanics_application_variables.h" #include "geometries/geometry_shape_function_container.h" #include "custom_geometries/quadrature_point_partitioned_geometry.h" #include "geometries/geometry.h" #include "includes/model_part.h" #include "boost/geometry/geometry.hpp" #include "boost/geometry/geometries/register/point.hpp" #include "boost/geometry/geometries/register/ring.hpp" namespace Kratos { namespace MPMSearchElementUtility { // Standard types typedef std::size_t IndexType; typedef std::size_t SizeType; typedef Node<3> NodeType; typedef typename ModelPart::GeometryType GeometryType; // Boost Polygon typedef boost::geometry::model::point<double, 2, boost::geometry::cs::cartesian> Boost2DPointType; typedef boost::geometry::model::polygon<Boost2DPointType> Boost2DPolygonType; // Container types typedef typename GeometryShapeFunctionContainer<GeometryData::IntegrationMethod>::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryShapeFunctionContainer<GeometryData::IntegrationMethod>::IntegrationPointsContainerType IntegrationPointsContainerType; typedef typename GeometryShapeFunctionContainer<GeometryData::IntegrationMethod>::ShapeFunctionsValuesContainerType ShapeFunctionsValuesContainerType; typedef typename GeometryShapeFunctionContainer<GeometryData::IntegrationMethod>::ShapeFunctionsLocalGradientsContainerType ShapeFunctionsLocalGradientsContainerType; inline double CrossProductDet2D(array_1d<double, 3> VectorA, array_1d<double, 3> VectorB) { return (VectorA[0] * VectorB[1] - VectorB[0] * VectorA[1]); } inline bool CheckIsInside(const GeometryType& rGeom, array_1d<double, 3>& LocalCoords, const array_1d<double, 3>& Coords, const double Tolerance, const bool IsCalcLocalCoords = true) { bool is_inside = true; if (rGeom.Dimension() == 2) { is_inside = true; // Do walk around method Vector cross_products(rGeom.PointsNumber()); for (size_t i = 0; i < rGeom.PointsNumber(); ++i) { if (rGeom.Points()[i].Coordinates()[2] != 0.0) { return rGeom.IsInside(Coords, LocalCoords, Tolerance); break; } cross_products[i] = CrossProductDet2D(Coords - rGeom.Points()[i].Coordinates(), rGeom.Points()[(i + 1) % rGeom.PointsNumber()].Coordinates() - rGeom.Points()[i].Coordinates()); } for (size_t i = 1; i < cross_products.size(); ++i) { if (cross_products[i] * cross_products[0] < -std::abs(Tolerance)) { is_inside = false; break; } } } else return rGeom.IsInside(Coords, LocalCoords, Tolerance); if (is_inside) { if (IsCalcLocalCoords) return rGeom.IsInside(Coords, LocalCoords, Tolerance); else return true; } return false; } inline void ConstructNeighbourRelations(GeometryType& rGeom, const ModelPart& rBackgroundGridModelPart) { std::vector<typename Geometry<Node<3>>::Pointer> geometry_neighbours; for (IndexType j = 0; j < rBackgroundGridModelPart.NumberOfElements(); j++) { auto p_geometry_neighbour = (rBackgroundGridModelPart.ElementsBegin() + j)->pGetGeometry(); if (p_geometry_neighbour->Id() != rGeom.Id()) // dont add the parent as its own neighbour { for (IndexType n = 0; n < p_geometry_neighbour->size(); n++) { for (IndexType k = 0; k < rGeom.size(); k++) { if (rGeom[k].Id() == (*p_geometry_neighbour)[n].Id()) { // Prevent duplicate additions bool add_entry = true; for (size_t i = 0; i < geometry_neighbours.size(); i++) { if (geometry_neighbours[i]->Id() == p_geometry_neighbour->Id()) { add_entry = false; break; } } if (add_entry) { geometry_neighbours.push_back(p_geometry_neighbour); } break; } } } } } #pragma omp critical rGeom.SetValue(GEOMETRY_NEIGHBOURS, geometry_neighbours); } inline void CreateBoundingBoxPoints(std::vector<array_1d<double, 3>>& rPointVector, const array_1d<double, 3>& rCenter, const double SideHalfLength, const SizeType WorkingDim) { KRATOS_TRY if (WorkingDim == 2) { if (rPointVector.size() != 4) rPointVector.resize(4); for (size_t i = 0; i < 4; ++i) { rPointVector[i].clear(); rPointVector[i] += rCenter; } rPointVector[0][0] -= SideHalfLength; rPointVector[1][0] += SideHalfLength; rPointVector[2][0] += SideHalfLength; rPointVector[3][0] -= SideHalfLength; rPointVector[0][1] -= SideHalfLength; rPointVector[1][1] -= SideHalfLength; rPointVector[2][1] += SideHalfLength; rPointVector[3][1] += SideHalfLength; } else { if (rPointVector.size() != 8) rPointVector.resize(8); for (size_t i = 0; i < 8; ++i) { rPointVector[i].clear(); rPointVector[i] += rCenter; } rPointVector[0][0] -= SideHalfLength; rPointVector[1][0] += SideHalfLength; rPointVector[2][0] += SideHalfLength; rPointVector[3][0] -= SideHalfLength; rPointVector[4][0] -= SideHalfLength; rPointVector[5][0] += SideHalfLength; rPointVector[6][0] += SideHalfLength; rPointVector[7][0] -= SideHalfLength; rPointVector[0][1] -= SideHalfLength; rPointVector[1][1] -= SideHalfLength; rPointVector[2][1] += SideHalfLength; rPointVector[3][1] += SideHalfLength; rPointVector[4][1] -= SideHalfLength; rPointVector[5][1] -= SideHalfLength; rPointVector[6][1] += SideHalfLength; rPointVector[7][1] += SideHalfLength; rPointVector[0][2] -= SideHalfLength; rPointVector[1][2] -= SideHalfLength; rPointVector[2][2] -= SideHalfLength; rPointVector[3][2] -= SideHalfLength; rPointVector[4][2] += SideHalfLength; rPointVector[5][2] += SideHalfLength; rPointVector[6][2] += SideHalfLength; rPointVector[7][2] += SideHalfLength; } KRATOS_CATCH("") } inline void CheckPQMPM(IntegrationPointsArrayType& rIntergrationSubPoints, const double Tolerance, const Matrix& rN, const DenseVector<Matrix>& rDN_De) { KRATOS_TRY if (rIntergrationSubPoints.size() != rN.size1()) { KRATOS_INFO("MPMSearchElementUtility::Check - ") << "Shape function rows must equal number of sub-points!"; KRATOS_ERROR << "ERROR"; } for (size_t i = 0; i < rIntergrationSubPoints.size(); ++i) { if (rIntergrationSubPoints[i].Weight() < Tolerance) { KRATOS_INFO("MPMSearchElementUtility::Check - ") << "Volume fraction of sub-points is too small!"; KRATOS_ERROR << "ERROR"; } if (rIntergrationSubPoints[i].Weight() > 1.0) { KRATOS_INFO("MPMSearchElementUtility::Check - ") << "Volume fraction of sub-points is too large!"; KRATOS_ERROR << "ERROR"; } } for (size_t j = 0; j < rN.size2(); ++j) { SizeType nonzero_entries = 0; for (size_t i = 0; i < rIntergrationSubPoints.size(); i++) if (rN(i, j) > 0.0) nonzero_entries += 1; if (nonzero_entries != 1) { KRATOS_INFO("MPMSearchElementUtility::Check - ") << "There must be only one nonzero entry per shape function column!" << "\nrN = " << rN; KRATOS_ERROR << "ERROR"; } } KRATOS_CATCH("") } inline bool CheckAllPointsAreInGeom( const std::vector<array_1d<double, 3>>& rPoints, const GeometryType& rReferenceGeom, const double Tolerance) { KRATOS_TRY array_1d<double, 3> dummy_local_coords; for (size_t i = 0; i < rPoints.size(); ++i) { if (!CheckIsInside(rReferenceGeom, dummy_local_coords, rPoints[i], Tolerance, false)) { return false; } } return true; KRATOS_CATCH("") } inline void Check3DBackGroundMeshIsCubicAxisAligned(const std::vector<GeometryType*>& rIntersectedGeometries) { KRATOS_TRY NodeType point_low, point_high; for (size_t i = 0; i < rIntersectedGeometries.size(); ++i) { if (rIntersectedGeometries[i]->GetGeometryType() != GeometryData::Kratos_Hexahedra3D8) { #pragma omp single KRATOS_ERROR << "MPMSearchElementUtility::Check3DBackGroundMeshIsCubicAxisAligned - " << "3D PQMPM CAN ONLY BE USED FOR AXIS-ALIGNED RECTANGULAR-PRISM BACKGROUND GRIDS" << std::endl; } rIntersectedGeometries[i]->BoundingBox(point_low, point_high); for (size_t j = 0; j < rIntersectedGeometries[i]->PointsNumber(); ++j) { for (size_t k = 0; k < 3; ++k) { if (rIntersectedGeometries[i]->GetPoint(j).Coordinates()[k] != point_low[k]) { if (rIntersectedGeometries[i]->GetPoint(j).Coordinates()[k] != point_high[k]) { #pragma omp single KRATOS_ERROR << "MPMSearchElementUtility::Check3DBackGroundMeshIsCubicAxisAligned - " << "3D PQMPM CAN ONLY BE USED FOR AXIS-ALIGNED RECTANGULAR-PRISM BACKGROUND GRIDS" << std::endl; } } } } } KRATOS_CATCH("") } inline Boost2DPolygonType Create2DPolygonBoundingSquareFromPointsFast(const std::vector<array_1d<double, 3>>& rPoints, const bool XActive = true, const bool YActive = true, const bool ZActive = false) { KRATOS_TRY Boost2DPolygonType rPolygon; std::vector<Boost2DPointType> rPolygonPoints(5); if (!XActive || !YActive || ZActive) if (rPoints.size() != 8) { KRATOS_INFO("MPMSearchElementUtility::Create2DPolygonBoundingSquareFromPointsFast - ") << "ALL BOUNDING SQUARES SHOULD BE CONSTRUCTED IN XY SPACE EXCEPT FOR HEX BACKGROUND GRID\n"; KRATOS_ERROR << "ERROR"; } if (XActive && YActive && !ZActive) { for (size_t i = 0; i < 4; ++i) { rPolygonPoints[i] = Boost2DPointType(rPoints[i][0], rPoints[i][1]); } } else if (!XActive && YActive && ZActive) // 3D case only! { rPolygonPoints[0] = Boost2DPointType(rPoints[0][1], rPoints[0][2]); rPolygonPoints[1] = Boost2DPointType(rPoints[4][1], rPoints[4][2]); rPolygonPoints[2] = Boost2DPointType(rPoints[7][1], rPoints[7][2]); rPolygonPoints[3] = Boost2DPointType(rPoints[3][1], rPoints[3][2]); // as per Hexahedra3D8 node ordering } else if (XActive && !YActive && ZActive) { rPolygonPoints[0] = Boost2DPointType(rPoints[0][0], rPoints[0][2]); rPolygonPoints[1] = Boost2DPointType(rPoints[1][0], rPoints[1][2]); rPolygonPoints[2] = Boost2DPointType(rPoints[5][0], rPoints[5][2]); rPolygonPoints[3] = Boost2DPointType(rPoints[4][0], rPoints[4][2]); } else { KRATOS_INFO("MPMSearchElementUtility::Create2DPolygonBoundingSquareFromPointsFast - ") << "INVALID PLANE TO MAKE 2D POLYGON IN\n"; KRATOS_ERROR << "ERROR"; } rPolygonPoints[4] = rPolygonPoints[0]; rPolygon.outer().assign(rPolygonPoints.begin(), rPolygonPoints.end()); boost::geometry::correct(rPolygon); // to close the polygon return rPolygon; KRATOS_CATCH("") } inline Boost2DPolygonType Create2DPolygonFromGeometryFast(const GeometryType& rGeom, const bool XActive = true, const bool YActive = true, const bool ZActive = false) { KRATOS_TRY Boost2DPolygonType rPolygon; if (rGeom.WorkingSpaceDimension() == 3) { std::vector<Boost2DPointType> rPolygonPoints(5); NodeType point_low, point_high; rGeom.BoundingBox(point_low, point_high); if (XActive && YActive && !ZActive) { rPolygonPoints[0] = Boost2DPointType(point_low[0], point_low[1]); rPolygonPoints[1] = Boost2DPointType(point_high[0], point_low[1]); rPolygonPoints[2] = Boost2DPointType(point_high[0], point_high[1]); rPolygonPoints[3] = Boost2DPointType(point_low[0], point_high[1]); } else if (XActive && !YActive && ZActive) { rPolygonPoints[0] = Boost2DPointType(point_low[0], point_low[2]); rPolygonPoints[1] = Boost2DPointType(point_high[0], point_low[2]); rPolygonPoints[2] = Boost2DPointType(point_high[0], point_high[2]); rPolygonPoints[3] = Boost2DPointType(point_low[0], point_high[2]); } else if (!XActive && YActive && ZActive) { rPolygonPoints[0] = Boost2DPointType(point_low[1], point_low[2]); rPolygonPoints[1] = Boost2DPointType(point_high[1], point_low[2]); rPolygonPoints[2] = Boost2DPointType(point_high[1], point_high[2]); rPolygonPoints[3] = Boost2DPointType(point_low[1], point_high[2]); } else { KRATOS_INFO("MPMSearchElementUtility::Create2DPolygonFromGeometryFast - ") << "INVALID PLANE TO MAKE 2D POLYGON IN\n"; KRATOS_ERROR << "ERROR"; } rPolygonPoints[4] = rPolygonPoints[0]; rPolygon.outer().assign(rPolygonPoints.begin(), rPolygonPoints.end()); } else { std::vector<Boost2DPointType> rPolygonPoints(rGeom.PointsNumber() + 1); for (size_t i = 0; i < rGeom.PointsNumber(); ++i) { rPolygonPoints[i] = Boost2DPointType(rGeom.GetPoint(i).X(), rGeom.GetPoint(i).Y()); } rPolygonPoints[rGeom.PointsNumber()] = rPolygonPoints[0]; rPolygon.outer().assign(rPolygonPoints.begin(), rPolygonPoints.end()); } boost::geometry::correct(rPolygon); // to close the polygon return rPolygon; KRATOS_CATCH("") } inline IntegrationPoint<3> CreateSubPoint(const array_1d<double, 3>& rGlobalCoords, const double rVolumeFraction, const GeometryType& rBackgroundGridElementGeom, Vector& rN, Matrix& rDN_De) { KRATOS_TRY array_1d<double, 3> local_coordinates; rBackgroundGridElementGeom.PointLocalCoordinates(local_coordinates, rGlobalCoords); rBackgroundGridElementGeom.ShapeFunctionsValues(rN, local_coordinates); rBackgroundGridElementGeom.ShapeFunctionsLocalGradients(rDN_De, local_coordinates); return IntegrationPoint<3>(local_coordinates, rVolumeFraction); KRATOS_CATCH("") } inline void Determine2DSubPoint(const GeometryType& rGridElement, const std::vector<array_1d<double, 3>>& rMasterDomainPoints, array_1d<double, 3>& rSubPointCoord, double& rSubPointVolume) { KRATOS_TRY // make boost polygon of current background element geometry Boost2DPolygonType polygon_grid = Create2DPolygonFromGeometryFast(rGridElement); // make boost polygon of bounding box Boost2DPolygonType polygon_box = Create2DPolygonBoundingSquareFromPointsFast(rMasterDomainPoints); // make boost polygon result container std::vector<Boost2DPolygonType> polygon_result_container; // reset accumulated quantities rSubPointVolume = 0.0; rSubPointCoord.clear(); Boost2DPointType centroid_result; // accumulate result over intersected sub-polygons if (boost::geometry::intersection(polygon_grid, polygon_box, polygon_result_container)) { for (auto& polygon_result : polygon_result_container) { rSubPointVolume += boost::geometry::area(polygon_result); boost::geometry::centroid(polygon_result, centroid_result); rSubPointCoord[0] += centroid_result.get<0>(); rSubPointCoord[1] += centroid_result.get<1>(); } } else { KRATOS_INFO("MPMSearchElementUtility::Determine2DSubPoint - ") << "BOOST INTERSECTION FAILED ALTHOUGH KRATOS INTERSECTION WORKED\n"; KRATOS_ERROR << "ERROR"; } rSubPointCoord /= double(polygon_result_container.size()); KRATOS_CATCH("") } inline void Determine3DSubPoint(const GeometryType& rGridElement, const std::vector<array_1d<double, 3>>& rMasterDomainPoints, array_1d<double, 3>& rSubPointCoord, double& rSubPointVolume) { KRATOS_TRY // NOTE: THIS FUNCTION ASSUMES THE BACKGROUND GRID ELEMENT IS PERFECTLY CUBIC // AND THE RESULTING INTERSECTION VOLUME IS A RECTANGULAR PRISM // make boost xy polygon of current background element geometry Boost2DPolygonType polygon_grid_xy = Create2DPolygonFromGeometryFast(rGridElement); // make boost yz polygon of current background element geometry Boost2DPolygonType polygon_grid_yz = Create2DPolygonFromGeometryFast(rGridElement, false, true, true); // make boost xy polygon of bounding box Boost2DPolygonType polygon_box_xy = Create2DPolygonBoundingSquareFromPointsFast(rMasterDomainPoints); // make boost yz polygon of bounding box Boost2DPolygonType polygon_box_yz = Create2DPolygonBoundingSquareFromPointsFast(rMasterDomainPoints, false, true, true); // make boost polygon result container std::vector<Boost2DPolygonType> polygon_xy_result_container; std::vector<Boost2DPolygonType> polygon_yz_result_container; // reset accumulated quantities double sub_volume_area = 0.0; rSubPointCoord.clear(); Boost2DPointType centroid_result; // Determine area and x y coordinates from xy polygons if (boost::geometry::intersection(polygon_grid_xy, polygon_box_xy, polygon_xy_result_container)) { for (auto& polygon_result : polygon_xy_result_container) { sub_volume_area += boost::geometry::area(polygon_result); boost::geometry::centroid(polygon_result, centroid_result); rSubPointCoord[0] += centroid_result.get<0>(); rSubPointCoord[1] += centroid_result.get<1>(); } } else { KRATOS_INFO("MPMSearchElementUtility::Determine3DSubPoint - ") << "BOOST INTERSECTION FAILED ALTHOUGH KRATOS INTERSECTION WORKED\n"; KRATOS_ERROR << "ERROR"; } rSubPointCoord /= double(polygon_xy_result_container.size()); // at the moment this is just the xy coords! // Perform yz polygon intersection to determine depth and z-position of sub-point // local x = global y // local y = global z array_1d<double, 2> sub_point_z_coord = ZeroVector(2); bool is_initialized = false; double min_z = 0.0; double max_z = 0.0; if (boost::geometry::intersection(polygon_grid_yz, polygon_box_yz, polygon_yz_result_container)) { for (auto& polygon_result : polygon_yz_result_container) { for (auto& result_point : polygon_result.outer()) { if (!is_initialized) { min_z = result_point.get<1>(); max_z = result_point.get<1>(); is_initialized = true; } else if (result_point.get<1>() < min_z) min_z = result_point.get<1>(); else if (result_point.get<1>() > max_z) max_z = result_point.get<1>(); } } } else { KRATOS_INFO("MPMSearchElementUtility::Determine3DSubPoint - ") << "BOOST INTERSECTION FAILED ALTHOUGH KRATOS INTERSECTION WORKED\n"; KRATOS_ERROR << "ERROR"; } rSubPointCoord[2] = 0.5 * (min_z + max_z); rSubPointVolume = sub_volume_area * (max_z - min_z); KRATOS_CATCH("") } inline typename Geometry<Node<3>>::Pointer CreateCustomQuadraturePoint( SizeType WorkingSpaceDimension, SizeType LocalSpaceDimension, GeometryShapeFunctionContainer<GeometryData::IntegrationMethod>& rShapeFunctionContainer, typename Geometry<Node<3>>::PointsArrayType rPoints, GeometryType* pGeometryParent) { KRATOS_TRY if (WorkingSpaceDimension == 1 && LocalSpaceDimension == 1) return Kratos::make_shared< QuadraturePointPartitionedGeometry<Node<3>, 1>>( rPoints, rShapeFunctionContainer, pGeometryParent); else if (WorkingSpaceDimension == 2 && LocalSpaceDimension == 1) return Kratos::make_shared< QuadraturePointPartitionedGeometry<Node<3>, 2, 1>>( rPoints, rShapeFunctionContainer, pGeometryParent); else if (WorkingSpaceDimension == 2 && LocalSpaceDimension == 2) return Kratos::make_shared< QuadraturePointPartitionedGeometry<Node<3>, 2>>( rPoints, rShapeFunctionContainer, pGeometryParent); else if (WorkingSpaceDimension == 3 && LocalSpaceDimension == 2) return Kratos::make_shared< QuadraturePointPartitionedGeometry<Node<3>, 3, 2>>( rPoints, rShapeFunctionContainer, pGeometryParent); else if (WorkingSpaceDimension == 3 && LocalSpaceDimension == 3) return Kratos::make_shared< QuadraturePointPartitionedGeometry<Node<3>, 3>>( rPoints, rShapeFunctionContainer, pGeometryParent); else { KRATOS_ERROR << "Working/Local space dimension combinations are " << "not provided for QuadraturePointGeometry. WorkingSpaceDimension: " << WorkingSpaceDimension << ", LocalSpaceDimension: " << LocalSpaceDimension << std::endl; } KRATOS_CATCH("") } inline bool DetermineIfDomainOverlapsBoundaryConditions(std::vector<GeometryType*>& IntersectedGeometries, const array_1d<double, 3>& rCoordinates, const double SideHalfLength) { const double reach = (IntersectedGeometries[0]->WorkingSpaceDimension() == 3) ? 1.7321 * SideHalfLength : 1.414214 * SideHalfLength; for (size_t i = 0; i < IntersectedGeometries.size(); ++i) { for (size_t j = 0; j < IntersectedGeometries[i]->PointsNumber(); ++j) { auto node_it = IntersectedGeometries[i]->pGetPoint(j); bool is_fixed = false; if (node_it->IsFixed(DISPLACEMENT_X)) is_fixed = true; else if (node_it->IsFixed(DISPLACEMENT_Y)) is_fixed = true; else if (node_it->HasDofFor(DISPLACEMENT_Z)) if (node_it->IsFixed(DISPLACEMENT_Z)) is_fixed = true; if (is_fixed) { const double fixed_point_to_cog = norm_2(node_it->Coordinates() - rCoordinates); if (fixed_point_to_cog <= reach) return true; } } } return false; } inline bool IntersectionCheckWithBoundingBox(const GeometryType& rGeom, const array_1d<double, 3>& rCoord, const double SideHalfLength) { const double z_coord = (rGeom.WorkingSpaceDimension() == 3) ? SideHalfLength : 0.0; const Point point_low(rCoord[0] - SideHalfLength, rCoord[1] - SideHalfLength, rCoord[2] - z_coord); const Point point_high(rCoord[0] + SideHalfLength, rCoord[1] + SideHalfLength, rCoord[2] + z_coord); NodeType ele_point_low, ele_point_high; const double dimension_45_degree_factor = (rGeom.WorkingSpaceDimension() == 3) ? 1.7321 : 1.414214; double center_to_center = norm_2(rGeom.Center() - rCoord); rGeom.BoundingBox(ele_point_low, ele_point_high); double maximum_contact_range = dimension_45_degree_factor * SideHalfLength + norm_2(ele_point_high - ele_point_low); if (center_to_center <= maximum_contact_range) return true; return false; } inline void RecursivePQMPMNeighbourSearch(const ModelPart& rBackgroundGridModelPart, std::vector<GeometryType*>& rIntersectedGeometries, const Point& rPointLow, const Point& rPointHigh, IndexType& RecursionCount, const array_1d<double, 3>& rCoordinates, const double SideHalfLength, const SizeType MaxRecursions = 100) { RecursionCount += 1; if (RecursionCount < MaxRecursions) { if (!rIntersectedGeometries.back()->Has(GEOMETRY_NEIGHBOURS)) ConstructNeighbourRelations(*rIntersectedGeometries.back(), rBackgroundGridModelPart); auto& geometry_neighbours = rIntersectedGeometries.back()->GetValue(GEOMETRY_NEIGHBOURS); bool check_geom; for (size_t i = 0; i < geometry_neighbours.size(); ++i) { // dont check elements we have already intersected with check_geom = true; for (size_t j = 0; j < rIntersectedGeometries.size(); ++j) { if (geometry_neighbours[i]->Id() == rIntersectedGeometries[j]->Id()) { check_geom = false; break; } } if (check_geom) { // check if this background grid and the MP domain overlap if (IntersectionCheckWithBoundingBox(*geometry_neighbours[i], rCoordinates, SideHalfLength)) { if (geometry_neighbours[i]->HasIntersection(rPointLow, rPointHigh)) { // add to container and then search its neighbours rIntersectedGeometries.push_back(geometry_neighbours[i].get()); RecursivePQMPMNeighbourSearch(rBackgroundGridModelPart, rIntersectedGeometries, rPointLow, rPointHigh, RecursionCount, rCoordinates, SideHalfLength, MaxRecursions); } } } } } else KRATOS_INFO("RecursivePQMPMNeighbourSearch:: ") << "Recursion count of " << MaxRecursions << " exceeded\n" << std::endl; } inline bool CheckFixedNodesWithinBoundingBox(const PointerVector<Node<3>>& rNodesList, const Point& rPointHigh, const Point& rPointLow, const SizeType WorkingDim) { for (auto& node_it : rNodesList) { bool is_fixed = false; if (node_it.IsFixed(DISPLACEMENT_X)) is_fixed = true; else if (node_it.IsFixed(DISPLACEMENT_Y)) is_fixed = true; else if (WorkingDim == 3) if (node_it.IsFixed(DISPLACEMENT_Z)) is_fixed = true; if (is_fixed) { bool is_inside = true; for (size_t i = 0; i < WorkingDim; i++) { if (rPointLow.Coordinates()[i] <= node_it.Coordinates()[i] && node_it.Coordinates()[i] <= rPointHigh.Coordinates()[i]) { // we are inside for this dimension } else { is_inside = false; break; } } if (is_inside) return true; } } return false; } inline void PartitionMasterMaterialPointsIntoSubPoints(const ModelPart& rBackgroundGridModelPart, const array_1d<double, 3>& rCoordinates, const array_1d<double, 3>& rLocalCoords, Element& rMasterMaterialPoint, typename GeometryType::Pointer pQuadraturePointGeometry, const double Tolerance) { KRATOS_TRY; GeometryType& rParentGeom = pQuadraturePointGeometry->GetGeometryParent(0); // If axisymmetric make normal MP if (rBackgroundGridModelPart.GetProcessInfo().Has(IS_AXISYMMETRIC)) { if (rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_AXISYMMETRIC)) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( pQuadraturePointGeometry, rLocalCoords, rMasterMaterialPoint.GetGeometry().IntegrationPoints()[0].Weight(), rParentGeom); return; } } const SizeType working_dim = rParentGeom.WorkingSpaceDimension(); const double pqmpm_min_fraction = (rBackgroundGridModelPart.GetProcessInfo().Has(PQMPM_SUBPOINT_MIN_VOLUME_FRACTION)) ? std::max(rBackgroundGridModelPart.GetProcessInfo()[PQMPM_SUBPOINT_MIN_VOLUME_FRACTION], std::numeric_limits<double>::epsilon()) : std::numeric_limits<double>::epsilon(); // Get volume and set up master domain bounding points std::vector<double> mp_volume_vec; rMasterMaterialPoint.CalculateOnIntegrationPoints(MP_VOLUME, mp_volume_vec, rBackgroundGridModelPart.GetProcessInfo()); if (rBackgroundGridModelPart.GetProcessInfo()[DOMAIN_SIZE] == 2 && rMasterMaterialPoint.GetProperties().Has(THICKNESS)) mp_volume_vec[0] /= rMasterMaterialPoint.GetProperties()[THICKNESS]; const double side_half_length = std::pow(mp_volume_vec[0], 1.0 / double(working_dim)) / 2.0; std::vector<array_1d<double, 3>> master_domain_points(std::pow(2.0, working_dim)); CreateBoundingBoxPoints(master_domain_points, rCoordinates, side_half_length, working_dim); // Initially check if the bounding box volume scalar is less than the element volume scalar if (mp_volume_vec[0] <= rParentGeom.DomainSize()) { if (CheckAllPointsAreInGeom(master_domain_points, rParentGeom, Tolerance)) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( pQuadraturePointGeometry, rLocalCoords, rMasterMaterialPoint.GetGeometry().IntegrationPoints()[0].Weight(), rParentGeom); return; } } // we need to do splitting. Initially determine all grid elements we intersect with Point point_low(rCoordinates[0] - side_half_length, rCoordinates[1] - side_half_length, rCoordinates[2]); if (working_dim == 3) point_low[2] -= side_half_length; Point point_high(rCoordinates[0] + side_half_length, rCoordinates[1] + side_half_length, rCoordinates[2]); if (working_dim == 3) point_high[2] += side_half_length; SizeType number_of_nodes = 0; std::vector<GeometryType*> intersected_geometries; // Do neighbour searching to determine the intersected geometries IndexType recursion_count = 0; intersected_geometries.push_back(&rParentGeom); RecursivePQMPMNeighbourSearch(rBackgroundGridModelPart, intersected_geometries, point_low , point_high , recursion_count, rCoordinates, side_half_length); for (size_t i = 0; i < intersected_geometries.size(); ++i) number_of_nodes += intersected_geometries[i]->PointsNumber(); // If we are 3D, check background mesh are axis-aligned perfect rectangular prisms if (working_dim == 3) Check3DBackGroundMeshIsCubicAxisAligned(intersected_geometries); // Prepare containers to hold all sub-points const SizeType number_of_sub_material_points = intersected_geometries.size(); PointerVector<Node<3>> nodes_list(number_of_nodes); IntegrationPointsArrayType ips(number_of_sub_material_points); Matrix N_matrix(number_of_sub_material_points, number_of_nodes, -1.0); DenseVector<Matrix> DN_De_vector(number_of_sub_material_points); // Temporary local containers double sub_point_volume; array_1d<double, 3> sub_point_position; IndexType active_node_index = 0; IndexType active_subpoint_index = 0; // Loop over all intersected grid elements and make subpoints in each for (size_t i = 0; i < number_of_sub_material_points; ++i) { Matrix DN_De(intersected_geometries[i]->PointsNumber(), working_dim); Vector N(intersected_geometries[i]->PointsNumber()); sub_point_position.clear(); sub_point_volume = 0.0; IntegrationPoint<3> trial_subpoint; if (working_dim == 2) { Determine2DSubPoint(*intersected_geometries[i], master_domain_points, sub_point_position, sub_point_volume); sub_point_position[2] = rCoordinates[2]; // set z coord of sub point to that of the master } else Determine3DSubPoint(*intersected_geometries[i], master_domain_points, sub_point_position, sub_point_volume); trial_subpoint = CreateSubPoint(sub_point_position, sub_point_volume / mp_volume_vec[0], *intersected_geometries[i], N, DN_De); // Transfer local data to containers if (trial_subpoint.Weight() > pqmpm_min_fraction) { ips[active_subpoint_index] = trial_subpoint; DN_De_vector[active_subpoint_index] = DN_De; for (size_t j = 0; j < N.size(); ++j) { N_matrix(active_subpoint_index, active_node_index) = N[j]; nodes_list(active_node_index) = intersected_geometries[i]->pGetPoint(j); active_node_index += 1; } active_subpoint_index += 1; } } if (active_subpoint_index == 1) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( pQuadraturePointGeometry, rLocalCoords, rMasterMaterialPoint.GetGeometry().IntegrationPoints()[0].Weight(), rParentGeom); return; } IntegrationPointsArrayType ips_active(active_subpoint_index); PointerVector<Node<3>> nodes_list_active(active_node_index); if (ips_active.size() == ips.size()) { ips_active = ips; nodes_list_active = nodes_list; } else { N_matrix.resize(active_subpoint_index, active_node_index, true); DN_De_vector.resize(active_subpoint_index, true); for (size_t i = 0; i < active_subpoint_index; ++i) ips_active[i] = ips[i]; for (size_t i = 0; i < active_node_index; ++i) nodes_list_active(i) = nodes_list(i); } // check if there are any fixed nodes within the bounding box if (CheckFixedNodesWithinBoundingBox(nodes_list_active, point_high, point_low, working_dim)) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( pQuadraturePointGeometry, rLocalCoords, rMasterMaterialPoint.GetGeometry().IntegrationPoints()[0].Weight(), rParentGeom); return; } // Check volume fractions sum to unity double vol_sum = 0.0; for (size_t i = 0; i < ips_active.size(); ++i) vol_sum += ips_active[i].Weight(); if (std::abs(vol_sum - 1.0) > Tolerance) { const bool is_pqmpm_fallback = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_MAKE_NORMAL_MP_IF_PQMPM_FAILS)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_MAKE_NORMAL_MP_IF_PQMPM_FAILS) : false; if (is_pqmpm_fallback) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( pQuadraturePointGeometry, rLocalCoords, rMasterMaterialPoint.GetGeometry().IntegrationPoints()[0].Weight(), rParentGeom); return; } else { #pragma omp critical KRATOS_INFO("MPMSearchElementUtility::Check") << "Volume fraction of sub-points does not approximately sum to 1.0." << " This probably means the background grid is not big enough." << "\nPosition = " << rCoordinates << "\nNumber of active sub points = " << ips_active.size() << "\nNumber of trial sub points = " << ips.size() << "\nMaterial point volume = " << mp_volume_vec[0] << "\nTotal volume fraction = " << vol_sum << "\nIndividual volume fractions:\n"; for (size_t i = 0; i < ips_active.size(); ++i) std::cout << "\t" << ips_active[i].Weight() << "\t\t" << ips_active[i].Coordinates() << std::endl; KRATOS_ERROR << "ERROR"; } } else CheckPQMPM(ips_active, std::numeric_limits<double>::epsilon(), N_matrix, DN_De_vector); // Transfer data over GeometryData::IntegrationMethod ThisDefaultMethod = pQuadraturePointGeometry->GetDefaultIntegrationMethod(); IntegrationPointsContainerType ips_container; ips_container[ThisDefaultMethod] = ips_active; ShapeFunctionsValuesContainerType shape_function_container; shape_function_container[ThisDefaultMethod] = N_matrix; ShapeFunctionsLocalGradientsContainerType shape_function_derivatives_container; shape_function_derivatives_container[ThisDefaultMethod] = DN_De_vector; GeometryShapeFunctionContainer<GeometryData::IntegrationMethod> data_container( ThisDefaultMethod, ips_container, shape_function_container, shape_function_derivatives_container); for (size_t i = 0; i < nodes_list_active.size(); ++i) nodes_list_active[i].Set(ACTIVE); if (pQuadraturePointGeometry->IntegrationPointsNumber() == 1) { pQuadraturePointGeometry = CreateCustomQuadraturePoint(working_dim, pQuadraturePointGeometry->LocalSpaceDimension(), data_container, nodes_list_active, &rParentGeom); rMasterMaterialPoint.SetGeometry(pQuadraturePointGeometry); } pQuadraturePointGeometry->SetGeometryShapeFunctionContainer(data_container); pQuadraturePointGeometry->Points() = nodes_list_active; KRATOS_CATCH(""); } inline bool IsExplicitAndNeedsCorrection(GeometryType::Pointer pQuadraturePoint, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (pQuadraturePoint->IntegrationPointsNumber() == 1) { for (size_t i = 0; i < pQuadraturePoint->ShapeFunctionsValues().size2(); ++i) { if (pQuadraturePoint->ShapeFunctionsValues()(0, i) < std::numeric_limits<double>::epsilon()) return true; } } } } return false; } inline GeometryType& FindGridGeom(GeometryType& rParentGeom, const ModelPart& rBackgroundGridModelPart, const double Tolerance, const array_1d<double, 3>& xg, array_1d<double, 3>& rLocalCoords, const ProcessInfo& rProcessInfo, bool& IsFound) { IsFound = false; if (CheckIsInside(rParentGeom, rLocalCoords, xg, Tolerance)) { IsFound = true; return rParentGeom; } else { if (!rParentGeom.Has(GEOMETRY_NEIGHBOURS)) ConstructNeighbourRelations(rParentGeom, rBackgroundGridModelPart); auto& geometry_neighbours = rParentGeom.GetValue(GEOMETRY_NEIGHBOURS); for (IndexType k = 0; k < geometry_neighbours.size(); ++k) { if (CheckIsInside(*geometry_neighbours[k], rLocalCoords, xg, Tolerance)) { IsFound = true; return *(geometry_neighbours[k].get()); } } } return rParentGeom; } inline void UpdatePartitionedQuadraturePoint(const ModelPart& rBackgroundGridModelPart, const array_1d<double, 3>& rCoordinates, Element& rMasterMaterialPoint, typename GeometryType::Pointer pQuadraturePointGeometry, const double Tolerance) { array_1d<double, 3> local_coords; pQuadraturePointGeometry->IsInside(rCoordinates, local_coords, Tolerance); PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, rCoordinates, local_coords, rMasterMaterialPoint, pQuadraturePointGeometry, Tolerance); } inline void NeighbourSearchElements(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i) { auto element_itr = (rMPMModelPart.ElementsBegin() + i); array_1d<double, 3> local_coordinates; bool is_found = false; std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rBackgroundGridModelPart.GetProcessInfo()); GeometryType& r_found_geom = FindGridGeom(element_itr->GetGeometry().GetGeometryParent(0), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, xg[0], local_coordinates, *element_itr, element_itr->pGetGeometry(), Tolerance); } else { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( element_itr->pGetGeometry(), local_coordinates, element_itr->GetGeometry().IntegrationPoints()[0].Weight(), r_found_geom); } if (IsExplicitAndNeedsCorrection(element_itr->pGetGeometry(), rBackgroundGridModelPart.GetProcessInfo())) is_found = false; else { for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom.Points()[j].Set(ACTIVE); } } if(!is_found) { #pragma omp critical rMissingElements.push_back(&*element_itr); } } } // inline void NeighbourSearchConditions(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Condition::Pointer>& rMissingConditions, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i) { auto condition_itr = rMPMModelPart.Conditions().begin() + i; std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0 && condition_itr->Is(BOUNDARY)) { array_1d<double, 3> local_coordinates; bool is_found = false; GeometryType& r_found_geom = FindGridGeom(condition_itr->GetGeometry(), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { condition_itr->GetGeometry() = r_found_geom; for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom[j].Set(ACTIVE); } else { #pragma omp critical rMissingConditions.push_back(&*condition_itr); } } } } inline bool IsFixExplicitAndOnElementEdge(const Vector& N, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { // check if MP is exactly on the edge of the element, this gives spurious strains in explicit for (SizeType i = 0; i < N.size(); ++i) { if (std::abs(N[i]) < std::numeric_limits<double>::epsilon()) { return true; } } } } return false; } template <std::size_t TDimension> void BinBasedSearchElementsAndConditions(ModelPart& rMPMModelPart, ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, std::vector<typename Condition::Pointer>& rMissingConditions, const std::size_t MaxNumberOfResults, const double Tolerance) { const ProcessInfo& r_process_info = rBackgroundGridModelPart.GetProcessInfo(); bool is_pqmpm = (r_process_info.Has(IS_PQMPM)) ? r_process_info.GetValue(IS_PQMPM) : false; // Search background grid and make element active Vector N; const int max_result = 1000; #pragma omp parallel { BinBasedFastPointLocator<TDimension> SearchStructure(rBackgroundGridModelPart); SearchStructure.UpdateSearchDatabase(); typename BinBasedFastPointLocator<TDimension>::ResultContainerType results(max_result); // Element search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingElements.size()); ++i) { auto element_itr = *(rMissingElements.begin() + i); std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rMPMModelPart.GetProcessInfo()); typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { if (IsFixExplicitAndOnElementEdge(N, r_process_info) && !is_pqmpm) { // MP is exactly on the edge. Now we give it a little 'nudge' array_1d<double, 3> xg_nudged = array_1d<double, 3>(xg[0]); std::vector<array_1d<double, 3>> mp_vel; element_itr->CalculateOnIntegrationPoints(MP_VELOCITY, mp_vel, rMPMModelPart.GetProcessInfo()); xg_nudged += r_process_info[DELTA_TIME] / 1000.0 * mp_vel[0]; if (SearchStructure.FindPointOnMesh(xg_nudged, N, pelem, result_begin, MaxNumberOfResults, Tolerance)) { element_itr->SetValuesOnIntegrationPoints(MP_COORD, { xg_nudged }, rMPMModelPart.GetProcessInfo()); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: To prevent spurious explicit stresses, Material Point " << element_itr->Id() << " was nudged." << std::endl; } else { is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Material Point " << element_itr->Id() << " lies exactly on an element edge and may give spurious results." << std::endl; } } pelem->Set(ACTIVE); const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. UpdatePartitionedQuadraturePoint(rBackgroundGridModelPart, xg[0], *element_itr, pelem->pGetGeometry(), Tolerance); } else { auto p_quadrature_point_geometry = element_itr->pGetGeometry(); array_1d<double, 3> local_coordinates; p_quadrature_point_geometry->PointLocalCoordinates(local_coordinates, xg[0]); CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( p_quadrature_point_geometry, local_coordinates, p_quadrature_point_geometry->IntegrationPoints()[0].Weight(), pelem->GetGeometry()); } auto& r_geometry = element_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id() << " is failed. Geometry is cleared." << std::endl; element_itr->GetGeometry().clear(); element_itr->Reset(ACTIVE); element_itr->Set(TO_ERASE); } } // Condition search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingConditions.size()); ++i) { auto condition_itr = *(rMissingConditions.begin() + i); std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0) { // Only search for particle based BCs! // Grid BCs are still applied on MP_model_part but we don't want to search for them. typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { condition_itr->GetGeometry() = pelem->GetGeometry(); auto& r_geometry = condition_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id() << " is failed. Geometry is cleared." << std::endl; condition_itr->GetGeometry().clear(); condition_itr->Reset(ACTIVE); condition_itr->Set(TO_ERASE); } } } } } inline void ResetElementsAndNodes(ModelPart& rBackgroundGridModelPart) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i) { auto element_itr = rBackgroundGridModelPart.Elements().begin() + i; auto& r_geometry = element_itr->GetGeometry(); element_itr->Reset(ACTIVE); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Reset(ACTIVE); } } /** * @brief Search element connectivity for each particle * @details A search is performed to know in which grid element the material point falls. * If one or more material points fall in the grid element, the grid element is * set to be active and its connectivity is associated to the material point * element. * STEPS: * 1) All the elements are set to be INACTIVE * 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE * */ template<std::size_t TDimension> void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults, const double Tolerance) { ResetElementsAndNodes(rBackgroundGridModelPart); std::vector<typename Element::Pointer> missing_elements; std::vector<typename Condition::Pointer> missing_conditions; NeighbourSearchElements(rMPMModelPart, rBackgroundGridModelPart, missing_elements, Tolerance); NeighbourSearchConditions(rMPMModelPart, rBackgroundGridModelPart, missing_conditions, Tolerance); if (missing_conditions.size() > 0 || missing_elements.size() > 0) BinBasedSearchElementsAndConditions<TDimension>(rMPMModelPart, rBackgroundGridModelPart, missing_elements, missing_conditions, MaxNumberOfResults, Tolerance); } } // end namespace MPMSearchElementUtility } // end namespace Kratos #endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
chunk_reduction.h
/* Copyright 2013 IST Austria Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus This file is part of PHAT. PHAT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PHAT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PHAT. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "../helpers/misc.h" #include "../boundary_matrix.h" namespace phat { template <bool use_sqrt = false> class chunk_reduction_impl { public: enum column_type { GLOBAL , LOCAL_POSITIVE , LOCAL_NEGATIVE }; public: template< typename Representation > void operator() ( boundary_matrix< Representation >& boundary_matrix ) { const index nr_columns = boundary_matrix.get_num_cols(); if( omp_get_max_threads( ) > nr_columns ) omp_set_num_threads( 1 ); const dimension max_dim = boundary_matrix.get_max_dim(); std::vector< index > lowest_one_lookup( nr_columns, -1 ); std::vector < column_type > column_type( nr_columns, GLOBAL ); std::vector< char > is_active( nr_columns, false ); const index chunk_size = use_sqrt ? (index)sqrt( (double)nr_columns ) : nr_columns / omp_get_max_threads(); std::vector< index > chunk_boundaries; for( index cur_boundary = 0; cur_boundary < nr_columns; cur_boundary += chunk_size ) chunk_boundaries.push_back( cur_boundary ); chunk_boundaries.push_back( nr_columns ); for( dimension cur_dim = max_dim; cur_dim >= 1; cur_dim-- ) { // Phase 1: Reduce chunks locally -- 1st pass #pragma omp parallel for schedule( guided, 1 ) for( index chunk_id = 0; chunk_id < (index)chunk_boundaries.size() - 1; chunk_id++ ) _local_chunk_reduction( boundary_matrix, lowest_one_lookup, column_type, cur_dim, chunk_boundaries[ chunk_id ], chunk_boundaries[ chunk_id + 1 ], chunk_boundaries[ chunk_id ] ); boundary_matrix.sync(); // Phase 1: Reduce chunks locally -- 2nd pass #pragma omp parallel for schedule( guided, 1 ) for( index chunk_id = 1; chunk_id < (index)chunk_boundaries.size( ) - 1; chunk_id++ ) _local_chunk_reduction( boundary_matrix, lowest_one_lookup, column_type, cur_dim, chunk_boundaries[ chunk_id ], chunk_boundaries[ chunk_id + 1 ], chunk_boundaries[ chunk_id - 1 ] ); boundary_matrix.sync( ); } // get global columns std::vector< index > global_columns; for( index cur_col_idx = 0; cur_col_idx < nr_columns; cur_col_idx++ ) if( column_type[ cur_col_idx ] == GLOBAL ) global_columns.push_back( cur_col_idx ); // get active columns #pragma omp parallel for for( index idx = 0; idx < (index)global_columns.size(); idx++ ) is_active[ global_columns[ idx ] ] = true; _get_active_columns( boundary_matrix, lowest_one_lookup, column_type, global_columns, is_active ); // Phase 2+3: Simplify columns and reduce them for( dimension cur_dim = max_dim; cur_dim >= 1; cur_dim-- ) { // Phase 2: Simplify columns std::vector< index > temp_col; #pragma omp parallel for schedule( guided, 1 ), private( temp_col ) for( index idx = 0; idx < (index)global_columns.size(); idx++ ) if( boundary_matrix.get_dim( global_columns[ idx ] ) == cur_dim ) _global_column_simplification( global_columns[ idx ], boundary_matrix, lowest_one_lookup, column_type, is_active, temp_col ); boundary_matrix.sync(); // Phase 3: Reduce columns for( index idx = 0; idx < (index)global_columns.size(); idx++ ) { index cur_col = global_columns[ idx ]; if( boundary_matrix.get_dim( cur_col ) == cur_dim && column_type[ cur_col ] == GLOBAL ) { index lowest_one = boundary_matrix.get_max_index( cur_col ); while( lowest_one != -1 && lowest_one_lookup[ lowest_one ] != -1 ) { boundary_matrix.add_to( lowest_one_lookup[ lowest_one ], cur_col ); lowest_one = boundary_matrix.get_max_index( cur_col ); } if( lowest_one != -1 ) { lowest_one_lookup[ lowest_one ] = cur_col; boundary_matrix.clear( lowest_one ); } boundary_matrix.finalize( cur_col ); } } } boundary_matrix.sync(); } protected: template< typename Representation > void _local_chunk_reduction( boundary_matrix< Representation >& boundary_matrix , std::vector<index>& lowest_one_lookup , std::vector< column_type >& column_type , const dimension cur_dim , const index chunk_begin , const index chunk_end , const index row_begin ) { for( index cur_col = chunk_begin; cur_col < chunk_end; cur_col++ ) { if( column_type[ cur_col ] == GLOBAL && boundary_matrix.get_dim( cur_col ) == cur_dim ) { index lowest_one = boundary_matrix.get_max_index( cur_col ); while( lowest_one != -1 && lowest_one >= row_begin && lowest_one_lookup[ lowest_one ] != -1 ) { boundary_matrix.add_to( lowest_one_lookup[ lowest_one ], cur_col ); lowest_one = boundary_matrix.get_max_index( cur_col ); } if( lowest_one >= row_begin ) { lowest_one_lookup[ lowest_one ] = cur_col; column_type[ cur_col ] = LOCAL_NEGATIVE; column_type[ lowest_one ] = LOCAL_POSITIVE; boundary_matrix.clear( lowest_one ); boundary_matrix.finalize( cur_col ); } } } } template< typename Representation > void _get_active_columns( const boundary_matrix< Representation >& boundary_matrix , const std::vector< index >& lowest_one_lookup , const std::vector< column_type >& column_type , const std::vector< index >& global_columns , std::vector< char >& is_active ) { const index nr_columns = boundary_matrix.get_num_cols(); std::vector< char > finished( nr_columns, false ); std::vector< std::pair < index, index > > stack; std::vector< index > cur_col_values; #pragma omp parallel for schedule( guided, 1 ), private( stack, cur_col_values ) for( index idx = 0; idx < (index)global_columns.size(); idx++ ) { bool pop_next = false; index start_col = global_columns[ idx ]; stack.push_back( std::pair< index, index >( start_col, -1 ) ); while( !stack.empty() ) { index cur_col = stack.back().first; index prev_col = stack.back().second; if( pop_next ) { stack.pop_back(); pop_next = false; if( prev_col != -1 ) { if( is_active[ cur_col ] ) { is_active[ prev_col ] = true; } if( prev_col == stack.back().first ) { finished[ prev_col ] = true; pop_next = true; } } } else { pop_next = true; boundary_matrix.get_col( cur_col, cur_col_values ); for( index idx = 0; idx < (index) cur_col_values.size(); idx++ ) { index cur_row = cur_col_values[ idx ]; if( ( column_type[ cur_row ] == GLOBAL ) ) { is_active[ cur_col ] = true; } else if( column_type[ cur_row ] == LOCAL_POSITIVE ) { index next_col = lowest_one_lookup[ cur_row ]; if( next_col != cur_col && !finished[ cur_col ] ) { stack.push_back( std::make_pair( next_col, cur_col ) ); pop_next = false; } } } } } } } template< typename Representation > void _global_column_simplification( const index col_idx , boundary_matrix< Representation >& boundary_matrix , const std::vector< index >& lowest_one_lookup , const std::vector< column_type >& column_type , const std::vector< char >& is_active , std::vector< index >& temp_col ) { temp_col.clear(); while( !boundary_matrix.is_empty( col_idx ) ) { index cur_row = boundary_matrix.get_max_index( col_idx ); switch( column_type[ cur_row ] ) { case GLOBAL: temp_col.push_back( cur_row ); boundary_matrix.remove_max( col_idx ); break; case LOCAL_NEGATIVE: boundary_matrix.remove_max( col_idx ); break; case LOCAL_POSITIVE: if( is_active[ lowest_one_lookup[ cur_row ] ] ) boundary_matrix.add_to( lowest_one_lookup[ cur_row ], col_idx ); else boundary_matrix.remove_max( col_idx ); break; } } std::reverse( temp_col.begin(), temp_col.end() ); boundary_matrix.set_col( col_idx, temp_col ); } }; class chunk_reduction : public chunk_reduction_impl<false> {}; class chunk_reduction_sqrt : public chunk_reduction_impl<true> {}; }
video_mlv.c
#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <math.h> #include <time.h> #include <stdint.h> #include "video_mlv.h" /* Lossless decompression */ #include "liblj92/lj92.h" #include "liblj92/lj92.c" #define ROR32(v,a) ((v) >> (a) | (v) << (32-(a))) static void mlv_read_audio(mlv_header_t *video) { if(!((video->MLVI.audioClass) && video->audios && (video->WAVI.channels != 0))) return; int fread_err = 1; uint64_t mlv_audio_buffer_offset = 0; uint64_t mlv_audio_size = 0; for (uint32_t i = 0; i < video->audios; ++i) mlv_audio_size += video->audio_index[i].frame_size; uint8_t *mlv_audio_buffer = malloc(mlv_audio_size); for (uint32_t i = 0; i < video->audios; ++i) { /* Go to audio block position */ fseek(video->file[video->audio_index[i].chunk_num], video->audio_index[i].frame_offset, SEEK_SET); /* Read to location of audio */ fread_err &= fread(mlv_audio_buffer + mlv_audio_buffer_offset, video->audio_index[i].frame_size, 1, video->file[video->audio_index[i].chunk_num]); /* New audio position */ mlv_audio_buffer_offset += video->audio_index[i].frame_size; } if(!fread_err) { free(mlv_audio_buffer); return; } /* Calculate the sum of audio sample sizes for all audio channels */ uint64_t audio_sample_size = (video)->WAVI.channels * (video)->WAVI.bitsPerSample / 8; /* Calculate the audio alignement block size in bytes */ uint16_t block_align = audio_sample_size * 1024; /* Get time difference of first video and audio frames and calculate the sync offset */ uint64_t negative_offset = 0; uint64_t positive_offset = 0; int64_t sync_offset = (int64_t)( ( (double)video->video_index[0].frame_time - (double)video->audio_index[0].frame_time ) * (double)( video->WAVI.samplingRate * audio_sample_size / 1000000.0 ) ); if(sync_offset >= 0) negative_offset = (uint64_t)sync_offset - ((uint64_t)sync_offset % audio_sample_size); // Make sure value is multiple of sum of all channel sample sizes else positive_offset = (uint64_t)(-sync_offset) - ((uint64_t)(-sync_offset) % audio_sample_size); /* Calculate synced audio size */ uint64_t synced_audio_size = mlv_audio_size - negative_offset + positive_offset; /* Check if synced_audio_size is multiple of 'block_align' bytes and add one more block */ uint64_t synced_audio_size_aligned = synced_audio_size - (synced_audio_size % block_align) + block_align; /* Allocate synced audio buffer */ video->audio_data = calloc( synced_audio_size_aligned, 1 ); /* Copy cut/shifted audio data to the synced audio buffer */ memcpy(video->audio_data + positive_offset, mlv_audio_buffer + negative_offset, mlv_audio_size - negative_offset); free(mlv_audio_buffer); /* Calculate theoretical audio size according to fps */ uint64_t theoretic_size = (uint64_t)( (double)( video->WAVI.samplingRate * audio_sample_size * video->frames ) / ((double)(video->MLVI.sourceFpsNom / (double)video->MLVI.sourceFpsDenom)) ); /* Check if theoretic_size is multiple of 'block_align' bytes and add one more block */ uint64_t theoretic_size_aligned = theoretic_size - (theoretic_size % block_align) + block_align; /* Check calculated synced_audio_size_aligned against theoretic_size_aligned */ uint64_t final_audio_size_aligned = MIN(theoretic_size_aligned, synced_audio_size_aligned); video->audio_buffer_size = synced_audio_size_aligned; video->audio_size = final_audio_size_aligned; } static int seek_to_next_known_block(FILE * in_file) { uint64_t read_ahead_size = 128 * 1024 * 1024; uint8_t *ahead = malloc(read_ahead_size); uint64_t read = fread(ahead, 1, read_ahead_size, in_file); fseek(in_file, -read, SEEK_CUR); for (uint64_t i = 0; i < read; i++) { if (memcmp(ahead + i, "VIDF", 4) == 0 || memcmp(ahead + i, "AUDF", 4) == 0 || memcmp(ahead + i, "NULL", 4) == 0 || memcmp(ahead + i, "RTCI", 4) == 0) { fseek(in_file, i, SEEK_CUR); free(ahead); return 1; } } free(ahead); return 0; } /* Spanned multichunk MLV file handling */ static FILE **load_all_chunks(const char *base_filename, int *entries) { int seq_number = 0; int max_name_len = strlen(base_filename) + 16; char *filename = alloca(max_name_len); strncpy(filename, base_filename, max_name_len - 1); FILE **files = malloc(sizeof(FILE*)); files[0] = fopen(filename, "rb"); if(!files[0]) { free(files); return NULL; } // FIXME i think this test is broken and should probably return 0 anyways /* get extension and check if it is a .MLV */ char *dot = strrchr(filename, '.'); if(dot && strcasecmp(dot+1, "mlv")) seq_number = 100; (*entries)++; while(seq_number < 99) { FILE **realloc_files = realloc(files, (*entries + 1) * sizeof(FILE*)); if(!realloc_files) { free(files); return NULL; } files = realloc_files; /* check for the next file M00, M01 etc */ char seq_name[8]; sprintf(seq_name, "%02d", seq_number); seq_number++; strcpy(&filename[strlen(filename) - 2], seq_name); /* try to open */ files[*entries] = fopen(filename, "rb"); if(files[*entries]) (*entries)++; else break; } return files; } static void close_all_chunks(FILE ** files, int entries) { for(int i = 0; i < entries; i++) if(files[i]) fclose(files[i]); if(files) free(files); } // TODO: replace by qsort static void frame_index_sort(mlv_frame_index_t *frame_index, uint32_t entries) { if (!entries) return; uint32_t n = entries; do { uint32_t new_n = 1; for (uint32_t i = 0; i < n-1; ++i) { if (frame_index[i].frame_time > frame_index[i+1].frame_time) { mlv_frame_index_t tmp = frame_index[i+1]; frame_index[i+1] = frame_index[i]; frame_index[i] = tmp; new_n = i + 1; } } n = new_n; } while (n > 1); } /* Unpack or decompress original raw data */ int mlv_get_frame( mlv_header_t *video, uint64_t frame_index, uint16_t *unpackedFrame) { int bitdepth = video->RAWI.raw_info.bits_per_pixel; int width = video->RAWI.xRes; int height = video->RAWI.yRes; int pixel_cnt = width * height; int chunk = video->video_index[frame_index].chunk_num; uint32_t frame_size = video->video_index[frame_index].frame_size; uint64_t frame_offset = video->video_index[frame_index].frame_offset; uint64_t frame_header_offset = video->video_index[frame_index].block_offset; /* How many bytes is RAW frame */ int raw_frame_size = (width * height * bitdepth) / 8; /* Memory buffer for original RAW data */ uint8_t *raw_frame = malloc(raw_frame_size + 4); // additional 4 bytes for safety FILE *file = video->file[chunk]; fseek(file, frame_header_offset, SEEK_SET); if(fread(&video->VIDF, sizeof(mlv_vidf_hdr_t), 1, file) != 1) { free(raw_frame); return 1; } fseek(file, frame_offset, SEEK_SET); if (video->MLVI.videoClass & MLV_VIDEO_CLASS_FLAG_LJ92) { if(fread(raw_frame, frame_size, 1, file) != 1) { // frame data read error free(raw_frame); return 1; } int components = 1; lj92 decoder_object; int ret = lj92_open(&decoder_object, raw_frame, frame_size, &width, &height, &bitdepth, &components); if(ret != LJ92_ERROR_NONE) { // lj92 decoding failed free(raw_frame); return 1; } else { ret = lj92_decode(decoder_object, unpackedFrame, width * height * components, 0, NULL, 0); if(ret != LJ92_ERROR_NONE) { // lj92 failure free(raw_frame); return 1; } } lj92_close(decoder_object); } else /* If not compressed just unpack to 16bit */ { if(fread(raw_frame, raw_frame_size, 1, file) != 1) { // can't read frame data free(raw_frame); return 1; } uint32_t mask = (1 << bitdepth) - 1; #pragma omp parallel for for (int i = 0; i < pixel_cnt; ++i) { uint32_t bits_offset = i * bitdepth; uint32_t bits_address = bits_offset / 16; uint32_t bits_shift = bits_offset % 16; uint32_t rotate_value = 16 + ((32 - bitdepth) - bits_shift); uint32_t uncorrected_data = *((uint32_t *)&((uint16_t *)raw_frame)[bits_address]); uint32_t data = ROR32(uncorrected_data, rotate_value); unpackedFrame[i] = ((uint16_t)(data & mask)); } } free(raw_frame); return 0; } void mlv_header_init(mlv_header_t *video) { memset(video, 0, sizeof(*video)); } /* Free all memory and close file */ void mlv_header_cleanup(mlv_header_t *video) { /* Close all MLV file chunks */ if(video->file) close_all_chunks(video->file, video->filenum); /* Free all memory */ free(video->video_index); free(video->audio_index); free(video->vers_index); free(video->audio_data); memset(video, 0, sizeof(*video)); } int mlv_open_clip( mlv_header_t *video, const char *filename, int open_mode) { video->file = load_all_chunks(filename, &video->filenum); if(!video->file) return MLV_ERR_OPEN; // can not open file uint64_t block_num = 0; /* Number of blocks in file */ mlv_hdr_t block_header; /* Basic MLV block header */ uint64_t video_frames = 0; /* Number of frames in video */ uint64_t audio_frames = 0; /* Number of audio blocks in video */ uint32_t vers_blocks = 0; /* Number of VERS blocks in MLV */ uint64_t video_index_max = 0; /* initial size of frame index */ uint64_t audio_index_max = 0; /* initial size of audio index */ uint32_t vers_index_max = 0; /* initial size of VERS index */ int mlvi_read = 0; /* Flips to 1 if 1st chunk MLVI block was read */ int rtci_read = 0; /* Flips to 1 if 1st RTCI block was read */ int lens_read = 0; /* Flips to 1 if 1st LENS block was read */ int elns_read = 0; /* Flips to 1 if 1st ELNS block was read */ int wbal_read = 0; /* Flips to 1 if 1st WBAL block was read */ int styl_read = 0; /* Flips to 1 if 1st STYL block was read */ int fread_err = 1; for(int i = 0; i < video->filenum; i++) { /* Getting size of file in bytes */ fseek(video->file[i], 0, SEEK_END); uint64_t file_size = ftell(video->file[i]); if(!file_size) { --video->filenum; return MLV_ERR_INVALID; } fseek(video->file[i], 0, SEEK_SET); /* Start of file */ /* Read file header */ if(fread(&block_header, sizeof(mlv_hdr_t), 1, video->file[i]) != 1) { --video->filenum; return MLV_ERR_INVALID; } fseek(video->file[i], 0, SEEK_SET); /* Start of file */ if(memcmp(block_header.blockType, "MLVI", 4) == 0) { if( !mlvi_read ) { fread_err &= fread(&video->MLVI, sizeof(mlv_file_hdr_t), 1, video->file[i]); mlvi_read = 1; // read MLVI only for first chunk } } else { --video->filenum; return MLV_ERR_INVALID; } while ( ftell(video->file[i]) < file_size ) /* Check if were at end of file yet */ { /* Record position to go back to it later if block is read */ uint64_t block_start = ftell(video->file[i]); /* Read block header */ fread_err &= fread(&block_header, sizeof(mlv_hdr_t), 1, video->file[i]); if(block_header.blockSize < sizeof(mlv_hdr_t)) { // invalid block size --video->filenum; return MLV_ERR_INVALID; } /* Next block location */ uint64_t next_block = (uint64_t)block_start + (uint64_t)block_header.blockSize; /* Go back to start of block for next bit */ fseek(video->file[i], block_start, SEEK_SET); /* Now check what kind of block it is and read it in to the mlv object */ if ( memcmp(block_header.blockType, "NULL", 4) == 0 || memcmp(block_header.blockType, "BKUP", 4) == 0) { /* do nothing, skip this block */ } else if ( memcmp(block_header.blockType, "VIDF", 4) == 0 ) { fread_err &= fread(&video->VIDF, sizeof(mlv_vidf_hdr_t), 1, video->file[i]); /* Dynamically resize the frame index buffer */ if(!video_index_max) { video_index_max = 128; video->video_index = calloc(video_index_max, sizeof(mlv_frame_index_t)); } else if(video_frames >= video_index_max - 1) { uint64_t video_index_new_size = video_index_max * 2; mlv_frame_index_t * video_index_new = calloc(video_index_new_size, sizeof(mlv_frame_index_t)); memcpy(video_index_new, video->video_index, video_index_max * sizeof(mlv_frame_index_t)); free(video->video_index); video->video_index = video_index_new; video_index_max = video_index_new_size; } /* Fill frame index */ video->video_index[video_frames].frame_type = 1; video->video_index[video_frames].chunk_num = i; video->video_index[video_frames].frame_size = video->VIDF.blockSize - sizeof(mlv_vidf_hdr_t) - video->VIDF.frameSpace; video->video_index[video_frames].frame_offset = ftell(video->file[i]) + video->VIDF.frameSpace; video->video_index[video_frames].frame_number = video->VIDF.frameNumber; video->video_index[video_frames].frame_time = video->VIDF.timestamp; video->video_index[video_frames].block_offset = block_start; /* Count actual video frames */ video_frames++; /* In preview mode exit loop after first videf read */ if(open_mode == MLV_OPEN_PREVIEW) { video->frames = video_frames; video->audios = audio_frames; goto preview_out; } } else if ( memcmp(block_header.blockType, "AUDF", 4) == 0 ) { fread_err &= fread(&video->AUDF, sizeof(mlv_audf_hdr_t), 1, video->file[i]); /* Dynamically resize the audio index buffer */ if(!audio_index_max) { audio_index_max = 32; video->audio_index = malloc(sizeof(mlv_frame_index_t) * audio_index_max); } else if(audio_frames >= audio_index_max - 1) { uint64_t audio_index_new_size = audio_index_max * 2; mlv_frame_index_t * audio_index_new = calloc(audio_index_new_size, sizeof(mlv_frame_index_t)); memcpy(audio_index_new, video->audio_index, audio_index_max * sizeof(mlv_frame_index_t)); free(video->audio_index); video->audio_index = audio_index_new; audio_index_max = audio_index_new_size; } /* Fill audio index */ video->audio_index[audio_frames].frame_type = 2; video->audio_index[audio_frames].chunk_num = i; video->audio_index[audio_frames].frame_size = video->AUDF.blockSize - sizeof(mlv_audf_hdr_t) - video->AUDF.frameSpace; video->audio_index[audio_frames].frame_offset = ftell(video->file[i]) + video->AUDF.frameSpace; video->audio_index[audio_frames].frame_number = video->AUDF.frameNumber; video->audio_index[audio_frames].frame_time = video->AUDF.timestamp; video->audio_index[audio_frames].block_offset = block_start; /* Count actual audio frames */ audio_frames++; } else if ( memcmp(block_header.blockType, "RAWI", 4) == 0 ) { fread_err &= fread(&video->RAWI, sizeof(mlv_rawi_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "RAWC", 4) == 0 ) { fread_err &= fread(&video->RAWC, sizeof(mlv_rawc_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "WAVI", 4) == 0 ) { fread_err &= fread(&video->WAVI, sizeof(mlv_wavi_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "EXPO", 4) == 0 ) { fread_err &= fread(&video->EXPO, sizeof(mlv_expo_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "LENS", 4) == 0 ) { if( !lens_read ) { fread_err &= fread(&video->LENS, sizeof(mlv_lens_hdr_t), 1, video->file[i]); lens_read = 1; //read only first one //Terminate string, if it isn't terminated. for( int n = 0; n < 32; n++ ) { if( video->LENS.lensName[n] == '\0' ) break; if( n == 31 ) video->LENS.lensName[n] = '\0'; } } } else if ( memcmp(block_header.blockType, "ELNS", 4) == 0 ) { if( !elns_read ) { fread_err &= fread(&video->ELNS, sizeof(mlv_elns_hdr_t), 1, video->file[i]); elns_read = 1; //read only first one } } else if ( memcmp(block_header.blockType, "WBAL", 4) == 0 ) { if( !wbal_read ) { fread_err &= fread(&video->WBAL, sizeof(mlv_wbal_hdr_t), 1, video->file[i]); wbal_read = 1; //read only first one } } else if ( memcmp(block_header.blockType, "STYL", 4) == 0 ) { if( !styl_read ) { fread_err &= fread(&video->STYL, sizeof(mlv_styl_hdr_t), 1, video->file[i]); styl_read = 1; //read only first one } } else if ( memcmp(block_header.blockType, "RTCI", 4) == 0 ) { if( !rtci_read ) { fread_err &= fread(&video->RTCI, sizeof(mlv_rtci_hdr_t), 1, video->file[i]); rtci_read = 1; //read only first one } } else if ( memcmp(block_header.blockType, "IDNT", 4) == 0 ) { fread_err &= fread(&video->IDNT, sizeof(mlv_idnt_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "INFO", 4) == 0 ) { fread_err &= fread(&video->INFO, sizeof(mlv_info_hdr_t), 1, video->file[i]); if(video->INFO.blockSize > sizeof(mlv_info_hdr_t)) { fread_err &= fread(&video->INFO_STRING, video->INFO.blockSize - sizeof(mlv_info_hdr_t), 1, video->file[i]); } } else if ( memcmp(block_header.blockType, "DISO", 4) == 0 ) { fread_err &= fread(&video->DISO, sizeof(mlv_diso_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "MARK", 4) == 0 ) { /* do nothing atm */ //fread(&video->MARK, sizeof(mlv_mark_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "ELVL", 4) == 0 ) { /* do nothing atm */ //fread(&video->ELVL, sizeof(mlv_elvl_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "DEBG", 4) == 0 ) { /* do nothing atm */ //fread(&video->DEBG, sizeof(mlv_debg_hdr_t), 1, video->file[i]); } else if ( memcmp(block_header.blockType, "VERS", 4) == 0 ) { /* Find all VERS blocks and make index for them */ fread_err &= fread(&video->VERS, sizeof(mlv_vers_hdr_t), 1, video->file[i]); /* Dynamically resize the index buffer */ if(!vers_index_max) { vers_index_max = 128; video->vers_index = calloc(vers_index_max, sizeof(mlv_frame_index_t)); } else if(vers_blocks >= vers_index_max - 1) { uint64_t vers_index_new_size = vers_index_max * 2; mlv_frame_index_t * vers_index_new = calloc(vers_index_new_size, sizeof(mlv_frame_index_t)); memcpy(vers_index_new, video->vers_index, vers_index_max * sizeof(mlv_frame_index_t)); free(video->vers_index); video->vers_index = vers_index_new; vers_index_max = vers_index_new_size; } /* Fill frame index */ video->vers_index[vers_blocks].frame_type = 3; video->vers_index[vers_blocks].chunk_num = i; video->vers_index[vers_blocks].frame_size = video->VERS.blockSize - sizeof(mlv_vers_hdr_t); video->vers_index[vers_blocks].frame_offset = ftell(video->file[i]); video->vers_index[vers_blocks].frame_number = vers_blocks; video->vers_index[vers_blocks].frame_time = video->VERS.timestamp; video->vers_index[vers_blocks].block_offset = block_start; /* Count actual VERS blocks */ vers_blocks++; } else if ( memcmp(block_header.blockType, "DARK", 4) == 0 ) { fread_err &= fread(&video->DARK, sizeof(mlv_dark_hdr_t), 1, video->file[i]); video->dark_frame_offset = ftell(video->file[i]); } else { // block name is wrong, so try to brute force the position of next valid block if(!seek_to_next_known_block(video->file[i])) { // unknown block type --video->filenum; return MLV_ERR_CORRUPTED; } continue; } /* Move to next block */ fseek(video->file[i], next_block, SEEK_SET); block_num++; } } if(!fread_err) { --video->filenum; return MLV_ERR_IO; } if(!video_frames) { --video->filenum; return MLV_ERR_INVALID; } /* Set total block amount in mlv */ video->block_num = block_num; /* Sort video and audio frames by time stamp */ if(video_frames) frame_index_sort(video->video_index, video_frames); if(audio_frames) frame_index_sort(video->audio_index, audio_frames); /* Set frame count in video object */ video->frames = video_frames; /* Set audio count in video object */ video->audios = audio_frames; /* Set VERS block count in video object */ video->vers_blocks = vers_blocks; /* Reads MLV audio into buffer (video->audio_data) and sync it, * set full audio buffer size (video->audio_buffer_size) and * aligned usable audio data size (video->audio_size) */ mlv_read_audio(video); preview_out: /* NON compressed frame size */ video->frame_size = video->RAWI.xRes * video->RAWI.yRes * video->RAWI.raw_info.bits_per_pixel / 8; video->frame_rate = (double)(video->MLVI.sourceFpsNom / (double)video->MLVI.sourceFpsDenom); return MLV_ERR_NONE; }
task_target_device_codegen.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[ .].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER void test_task_affinity() { int t; #pragma omp task { #pragma omp target device(t) ; } } #endif // CHECK-LABEL: define {{[^@]+}}@test_task_affinity // CHECK-SAME: () #[[ATTR0:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[T:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 48, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*)) // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.kmp_task_t_with_privates* // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 1 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[T]], align 4 // CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP5]], align 8 // CHECK-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP1]]) // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18 // CHECK-SAME: () #[[ATTR1:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map. // CHECK-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i32** noalias [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32**, align 8 // CHECK-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 8 // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 // CHECK-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 // CHECK-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 // CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 // CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 // CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* // CHECK-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) // CHECK-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 // CHECK-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32**)* // CHECK-NEXT: call void [[TMP15]](i8* [[TMP14]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] // CHECK-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 // CHECK-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !12 // CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18() #[[ATTR4]] // CHECK-NEXT: ret i32 0 //
ast-dump-openmp-taskloop-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp taskloop simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp taskloop simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp taskloop simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp taskloop simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp taskloop simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:4:1, col:26> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:10:1, col:26> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:17:1, col:38> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:24:1, col:38> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTaskLoopSimdDirective {{.*}} <line:31:1, col:38> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:36> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
trmm_x_coo_u_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; r++) for (ALPHA_INT c = 0; c < columns; c++) { ALPHA_Number t1, t2; alpha_mul(t1, y[r * ldy + c], beta); alpha_mul(t2, alpha, x[index2(r, c, ldx)]); alpha_add(y[r * ldy + c], t1, t2); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); for (ALPHA_INT ai = 0; ai < mat->nnz; ++ai) { ALPHA_INT cr = mat->row_indx[ai]; if (cr % num_threads != tid) continue; ALPHA_Number *Y = &y[index2(cr, 0, ldy)]; if (mat->col_indx[ai] < cr) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(mat->col_indx[ai], 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "xgboost/base.h" #include "xgboost/json.h" #include "xgboost/tree_updater.h" #include "param.h" #include "constraints.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" #include "../common/threading_utils.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Configure(const Args& args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("train_param"), &this->param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = ToJson(param_); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = page[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const { std::vector<bst_feature_t> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { { // setup position position_.resize(gpair.size()); std::fill(position_.begin(), position_.end(), 0); // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { CHECK_EQ(param_.sampling_method, TrainParam::kUniform) << "Only uniform sampling is supported, " << "gradient-based sampling is only support by GPU Hist."; std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); qexpand_.push_back(0); this->UpdateNode2WorkIndex(tree); } this->interaction_constraints_.Configure(param_, fmat.Info().num_col_); } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief This is a helper function that uses a column based data structure * and reset the positions to the latest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); common::ParallelFor(ndata, [&](bst_omp_uint ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { auto page = batch.GetView(); for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = page[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); common::ParallelFor(ndata, [&](bst_omp_uint j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } }); } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (auto fid : fsplits) { auto col = page[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); common::ParallelFor(ndata, [&](bst_omp_uint j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); dmlc::OMPException exc; #pragma omp parallel { exc.Run([&]() { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } }); } exc.Rethrow(); // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); common::ParallelFor(ndata, [&](bst_omp_uint ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } }); // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; FeatureInteractionConstraintHost interaction_constraints_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
GB_unaryop__lnot_uint32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int8 // op(A') function: GB_tran__lnot_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int8 ( uint32_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__minv_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_int8_int8) // op(A') function: GB (_unop_tran__minv_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dataset.h
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, qurey level informations. * * Some details: * 1. Label, used for traning. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed) * the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1]) * 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null costructor */ Metadata(); /*! * \brief Initialization will load qurey level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indice of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const float* label, data_size_t len); void SetWeights(const float* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(FILE* file) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const float* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, float value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, float value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const float* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const float* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ const char* data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<float> label_; /*! \brief Weights data */ std::vector<float> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<float> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; /*! * \brief Create a object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to traning or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>& bin_mappers, const std::vector<std::vector<int>>& sample_non_zero_indices, size_t total_sample_cnt, const IOConfig& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms( const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>& ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split( int feature, uint32_t threshold, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); } } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Warning("size of feature_names error, should equal with total number of features"); return; } feature_names_ = std::vector<std::string>(feature_names); } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; private: const char* data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; bool is_finish_load_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
rawBLAKE2_512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2012 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. * * Re-used for BLAKE2 by Dhiru Kholia (dhiru at openwall.com) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawBLAKE2; #elif FMT_REGISTERS_H john_register_one(&fmt_rawBLAKE2); #else #include "arch.h" #include "blake2.h" #include "params.h" #include "common.h" #include "formats.h" #include <string.h> #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "Raw-Blake2" #define FORMAT_NAME "" #if defined(__AVX__) #define ALGORITHM_NAME "128/128 AVX" #elif defined(__XOP__) #define ALGORITHM_NAME "128/128 XOP" #elif defined(__SSE4_1__) #define ALGORITHM_NAME "128/128 SSE4.1" #elif defined(__SSSE3__) #define ALGORITHM_NAME "128/128 SSSE3" #elif defined(__SSE2__) #define ALGORITHM_NAME "128/128 SSE2" #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR #endif #define FORMAT_TAG "$BLAKE2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 64 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"4245af08b46fbb290222ab8a68613621d92ce78577152d712467742417ebc1153668f1c9e1ec1e152a32a9c242dc686d175e087906377f0c483c5be2cb68953e", "blake2"}, {"$BLAKE2$021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0", "hello world"}, /* hash generated by multiple versions (in C and Go) of b2sum program */ {"$BLAKE2$1f7d9b7c9a90f7bfc66e52b69f3b6c3befbd6aee11aac860e99347a495526f30c9e51f6b0db01c24825092a09dd1a15740f0ade8def87e60c15da487571bcef7", "verystrongandlongpassword"}, /* test vectors from Wikipedia */ {"$BLAKE2$a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918", "The quick brown fox jumps over the lazy dog"}, {"$BLAKE2$786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce", ""}, {"$BLAKE2$da40d8f48e9e7560c56e2b92205aed6342a276994ca0287ea4f8c1423ef07d519ecb4bf8668c118379a36be8aa6c077bbc6213fa81fbb332fad9d8a19a7756e6", "UPPERCASE"}, {"$BLAKE2$f5ab8bafa6f2f72b431188ac38ae2de7bb618fb3d38b6cbf639defcdd5e10a86b22fccff571da37e42b23b80b657ee4d936478f582280a87d6dbb1da73f5c47d", "123456789"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, FORMAT_TAG_LEN)) p += FORMAT_TAG_LEN; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[FORMAT_TAG_LEN + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) ciphertext += FORMAT_TAG_LEN; memcpy(out, FORMAT_TAG, FORMAT_TAG_LEN); memcpy(out + FORMAT_TAG_LEN, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + FORMAT_TAG_LEN); return out; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + FORMAT_TAG_LEN; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { (void)blake2b((uint8_t *)crypt_out[index], saved_key[index], NULL, 64, saved_len[index], 0); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rawBLAKE2 = { { FORMAT_LABEL, FORMAT_NAME, "BLAKE2b 512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__one_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_fp64_fp64 // op(A') function: GB_unop_tran__one_fp64_fp64 // C type: double // A type: double // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ColorUtils.h
#ifndef CAPTURE3_COLOR_UTILS_H #define CAPTURE3_COLOR_UTILS_H #include <cmath> #include <vector> #include <omp.h> namespace Capture3 { static double applyGamma(const double value) { return value > 0.0031308 ? 1.055 * std::pow(value, 1.0 / 2.4) - 0.055 : 12.92 * value; } static void applyGamma(double *data, const unsigned int bytes) { #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < bytes; i++) { double color = data[i]; color = color > 0.0031308 ? 1.055 * std::pow(color, 1.0 / 2.4) - 0.055 : 12.92 * color; data[i] = color; } } static double applyInverseGamma(const double value) { return value > 0.04045 ? std::pow((value + 0.055) / 1.055, 2.4) : value / 12.92; } static void applyInverseGamma(double *data, const unsigned int bytes) { #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < bytes; i++) { double color = data[i]; color = color > 0.04045 ? std::pow((color + 0.055) / 1.055, 2.4) : color / 12.92; data[i] = color; } } static unsigned char to8Bit(const double value) { auto result = (int) lround(value * 255.0); result = result < 0 ? 0 : result; result = result > 255 ? 255 : result; return (unsigned char) result; } static void toGreyscale( const double *input, double *output, const unsigned int size, const double minR = 0, const double minG = 0, const double minB = 0, const double maxR = 1, const double maxG = 1, const double maxB = 1 ) { // Calculate normalized greyscale image #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < size; i++) { const unsigned int index = i * 3; const double colorR = (input[index + 0] - minR) / (maxR - minR); const double colorG = (input[index + 1] - minG) / (maxG - minG); const double colorB = (input[index + 2] - minB) / (maxB - minB); const double luma = (0.2126 * colorR) + (0.7152 * colorG) + (0.0722 * colorB); output[i] = luma; } } } #endif // CAPTURE3_COLOR_UTILS_H
cpl_fft-test.c
/* * This file is part of the ESO Common Pipeline Library * Copyright (C) 2001-2017 European Southern Observatory * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H # include <config.h> #endif /*----------------------------------------------------------------------------- Includes -----------------------------------------------------------------------------*/ #include "cpl_fft.h" #include "cpl_test.h" #include "cpl_image_io_impl.h" /*----------------------------------------------------------------------------- Defines -----------------------------------------------------------------------------*/ #ifndef IMAGESZ #define IMAGESZ 10 #endif #ifndef IMAGENZ #define IMAGENZ 5 #endif #ifndef CONSTANT #define CONSTANT 200 #endif /*----------------------------------------------------------------------------*/ /** * @defgroup cpl_fft_test Unit tests of the CPL FFT functions */ /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------- Private Function prototypes -----------------------------------------------------------------------------*/ static void cpl_fft_image_test(void); #if defined CPL_FFTWF_INSTALLED || defined CPL_FFTW_INSTALLED static void cpl_fft_image_test_one(cpl_size, cpl_size, cpl_type); static void cpl_fft_imagelist_test_one(cpl_size, cpl_size, cpl_size, cpl_type); static void cpl_fft_imagelist_test_image(cpl_size, cpl_size, cpl_size, cpl_type); static void cpl_fft_image_test_correlate(cpl_size, cpl_size, cpl_type); #endif /*----------------------------------------------------------------------------*/ /** @brief Unit tests of cpl_fft module **/ /*----------------------------------------------------------------------------*/ int main(void) { const cpl_type imtypes[] = {CPL_TYPE_DOUBLE, CPL_TYPE_FLOAT}; cpl_boolean do_bench; int i; cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING); do_bench = cpl_msg_get_level() <= CPL_MSG_INFO; /* Insert tests below */ #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < 2; i++) { const cpl_size mz = do_bench ? 10 * IMAGENZ : IMAGENZ; cpl_size nz; /* Collect wisdom */ #ifdef CPL_FFTWF_INSTALLED if (imtypes[i] == CPL_TYPE_FLOAT) { cpl_fft_image_test_one( 16, 16, imtypes[i]); cpl_fft_image_test_one( 4, 32, imtypes[i]); cpl_fft_image_test_one( 4, 4, imtypes[i]); cpl_fft_image_test_one( 2, 128, imtypes[i]); cpl_fft_image_test_one(128, 2, imtypes[i]); cpl_fft_image_test_one(128, 1, imtypes[i]); cpl_fft_image_test_one( 1, 128, imtypes[i]); cpl_fft_image_test_correlate( 16, 16, imtypes[i]); cpl_fft_image_test_correlate( 64, 128, imtypes[i]); cpl_fft_image_test_correlate(128, 64, imtypes[i]); cpl_fft_image_test_correlate(128, 128, imtypes[i]); if (do_bench) { cpl_fft_image_test_one(256, 256, imtypes[i]); cpl_fft_image_test_correlate(512, 512, imtypes[i]); } } #endif #ifdef CPL_FFTW_INSTALLED if (imtypes[i] == CPL_TYPE_DOUBLE) { cpl_fft_image_test_one( 16, 16, imtypes[i]); cpl_fft_image_test_one( 32, 4, imtypes[i]); cpl_fft_image_test_one( 4, 4, imtypes[i]); cpl_fft_image_test_one( 2, 128, imtypes[i]); cpl_fft_image_test_one(128, 2, imtypes[i]); cpl_fft_image_test_one(128, 1, imtypes[i]); cpl_fft_image_test_one( 1, 128, imtypes[i]); cpl_fft_image_test_correlate( 16, 16, imtypes[i]); cpl_fft_image_test_correlate( 64, 128, imtypes[i]); cpl_fft_image_test_correlate(128, 64, imtypes[i]); cpl_fft_image_test_correlate(128, 128, imtypes[i]); if (do_bench) { cpl_fft_image_test_one(256, 256, imtypes[i]); cpl_fft_image_test_correlate(512, 512, imtypes[i]); } } #endif for (nz = 1; nz <= 1 + mz; nz+= mz) { #ifdef CPL_FFTWF_INSTALLED if (imtypes[i] == CPL_TYPE_FLOAT) { cpl_fft_imagelist_test_image( 16, 16, nz, imtypes[i]); cpl_fft_imagelist_test_image( 4, 32, nz, imtypes[i]); cpl_fft_imagelist_test_image( 4, 4, nz, imtypes[i]); cpl_fft_imagelist_test_image( 2, 128, nz, imtypes[i]); cpl_fft_imagelist_test_image(128, 2, nz, imtypes[i]); cpl_fft_imagelist_test_image(128, 1, nz, imtypes[i]); cpl_fft_imagelist_test_image( 1, 128, nz, imtypes[i]); if (do_bench) { cpl_fft_imagelist_test_image(256, 256, nz, imtypes[i]); } } #endif #ifdef CPL_FFTW_INSTALLED if (imtypes[i] == CPL_TYPE_DOUBLE) { cpl_fft_imagelist_test_image( 16, 16, nz, imtypes[i]); cpl_fft_imagelist_test_image( 32, 4, nz, imtypes[i]); cpl_fft_imagelist_test_image( 4, 4, nz, imtypes[i]); cpl_fft_imagelist_test_image( 2, 128, nz, imtypes[i]); cpl_fft_imagelist_test_image(128, 2, nz, imtypes[i]); cpl_fft_imagelist_test_image(128, 1, nz, imtypes[i]); cpl_fft_imagelist_test_image( 1, 128, nz, imtypes[i]); if (do_bench) { cpl_fft_imagelist_test_image(256, 256, nz, imtypes[i]); } } #endif } } cpl_fft_image_test(); /* End of tests */ return cpl_test_end(0); } /*----------------------------------------------------------------------------*/ /** @internal @brief Unit tests of the function @see cpl_fft_image() **/ /*----------------------------------------------------------------------------*/ static void cpl_fft_image_test(void) { const cpl_type imtypes[] = {CPL_TYPE_DOUBLE, CPL_TYPE_FLOAT, CPL_TYPE_INT, CPL_TYPE_DOUBLE_COMPLEX, CPL_TYPE_FLOAT_COMPLEX}; int ityp; int nok = 0; /* Number of successful calls */ /* Insert tests below */ /* Iterate through all pixel types */ for (ityp = 0; ityp < (int)(sizeof(imtypes)/sizeof(imtypes[0])); ityp++) { const cpl_type imtype = imtypes[ityp]; int ityp2; cpl_image * img1 = cpl_image_new(IMAGESZ, IMAGESZ, imtype); cpl_image * img3 = cpl_image_new(IMAGESZ, IMAGESZ, imtype); cpl_error_code error; /* Various error checks */ error = cpl_fft_image(img3, NULL, CPL_FFT_FORWARD); cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT); error = cpl_fft_image(NULL, img3, CPL_FFT_FORWARD); cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT); error = cpl_fft_image(img3, img3, CPL_FFT_FORWARD | CPL_FFT_BACKWARD); if (imtype & CPL_TYPE_COMPLEX) { if (imtype & CPL_TYPE_DOUBLE) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT); #else cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE); #endif } else if (imtype & CPL_TYPE_FLOAT) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT); #else cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE); #endif } else { cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT); } } else if (imtype == CPL_TYPE_DOUBLE) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH); #else cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE); #endif } else if (imtype == CPL_TYPE_FLOAT) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH); #else cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE); #endif } else { cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH); } if (!(imtype & CPL_TYPE_COMPLEX)) { error = cpl_image_fill_noise_uniform(img1, 0, CONSTANT); cpl_test_eq_error(error, CPL_ERROR_NONE); } for (ityp2 = 0; ityp2 < (int)(sizeof(imtypes)/sizeof(imtypes[0])); ityp2++) { const cpl_type imtype2 = imtypes[ityp2]; cpl_image * img2 = cpl_image_new(IMAGESZ, IMAGESZ, imtype2); const cpl_image * imgin = img3; cpl_image * imgout = img2; int idir; /* No scaling on the forward transform has no effect */ unsigned mode = CPL_FFT_FORWARD | CPL_FFT_NOSCALE; error = cpl_image_copy(img3, img1, 1, 1); cpl_test_eq_error(error, CPL_ERROR_NONE); /* Transform first forward, then backward */ /* Those two iterations will succeed iff the input image and output image have matching non-integer precision */ for (idir = 0; idir < 2; idir++, mode = CPL_FFT_BACKWARD, imgin = img2, imgout = img3) { error = cpl_fft_image(imgout, imgin, mode); if (cpl_image_get_type(img3) == CPL_TYPE_FLOAT && cpl_image_get_type(img2) == (CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX)) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(CPL_ERROR_NONE, error); nok++; if (mode == CPL_FFT_BACKWARD) { /* Transformed forward and backwards, so the result should equal the original input */ cpl_test_image_abs(img1, img3, 3.0 * FLT_EPSILON * CONSTANT); } #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if (cpl_image_get_type(img3) == CPL_TYPE_DOUBLE && cpl_image_get_type(img2) == (CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX)) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(CPL_ERROR_NONE, error); nok++; if (mode == CPL_FFT_BACKWARD) { /* Transformed forward and backwards, so the result should equal the original input */ cpl_test_image_abs(img1, img3, 5.0 * DBL_EPSILON * CONSTANT); } #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if (cpl_image_get_type(img3) == (CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX) && cpl_image_get_type(img2) == (CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX)) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(CPL_ERROR_NONE, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if (cpl_image_get_type(img3) == (CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX) && cpl_image_get_type(img2) == (CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX)) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(CPL_ERROR_NONE, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if ((imtype & CPL_TYPE_INT) || (imtype2 & CPL_TYPE_INT)) { cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); } else if ((imtype & (CPL_TYPE_FLOAT | CPL_TYPE_DOUBLE | CPL_TYPE_INT)) != (imtype2 & (CPL_TYPE_FLOAT | CPL_TYPE_DOUBLE | CPL_TYPE_INT))) { cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); } else if (!((imtype & CPL_TYPE_COMPLEX) ^ (imtype2 & CPL_TYPE_COMPLEX))) { /* None or both are complex */ if (imtype == CPL_TYPE_DOUBLE) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if (imtype == CPL_TYPE_FLOAT) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else { cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); } } else if (imtype & CPL_TYPE_DOUBLE) { #ifdef CPL_FFTW_INSTALLED cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else if (imtype & CPL_TYPE_FLOAT) { #ifdef CPL_FFTWF_INSTALLED cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); #else cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error); #endif } else { cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error); } } cpl_image_delete(img2); } cpl_image_delete(img1); cpl_image_delete(img3); } #if defined CPL_FFTWF_INSTALLED && defined CPL_FFTW_INSTALLED cpl_test_eq(nok, 4); /* Forward and backward of float and double */ #elif defined CPL_FFTWF_INSTALLED cpl_msg_warning(cpl_func, "Double precision FFT not available for " "unit testing"); cpl_test_eq(nok, 2); /* Forward and backward of type float */ #elif defined CPL_FFTW_INSTALLED cpl_msg_warning(cpl_func, "Single precision FFT not available for " "unit testing"); cpl_test_eq(nok, 2); /* Forward and backward of type double */ #else cpl_msg_warning(cpl_func, "FFT not available for unit testing"); cpl_test_zero(nok); #endif } #if defined CPL_FFTWF_INSTALLED || defined CPL_FFTW_INSTALLED /*----------------------------------------------------------------------------*/ /** @internal @brief Unit tests of the function @param nx Size in x (the number of columns) @param ny Size in y (the number of rows) @param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT @see cpl_fft_image() **/ /*----------------------------------------------------------------------------*/ static void cpl_fft_image_test_one(cpl_size nx, cpl_size ny, cpl_type type) { const int rigor = CPL_FFT_FIND_MEASURE; cpl_image * image1r = cpl_image_new(nx, ny, type); cpl_image * image1c; cpl_image * image2 = cpl_image_new(nx, ny, type); cpl_image * image3r = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_image * image3c = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_image * image3h = cpl_image_new(nx/2+1, ny, type | CPL_TYPE_COMPLEX); cpl_image * image4; cpl_image * image4r; cpl_image * image4c; cpl_image * image5 = cpl_image_new(nx, ny, type); cpl_error_code error; error = cpl_image_fill_noise_uniform(image1r, 0.0, 1.0); cpl_test_eq_error(error, CPL_ERROR_NONE); image1c = cpl_image_cast(image1r, type | CPL_TYPE_COMPLEX); /* Real-to-complex, both full size */ error = cpl_fft_image(image3r, image1r, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* Extract half of r2c transform */ image4 = cpl_image_extract(image3r, 1, 1, nx/2 + 1, ny); /* Real-to-complex, complex is half size */ error = cpl_fft_image(image3h, image1r, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* That half has to match the transform onto the half-sized image */ cpl_test_image_abs(image3h, image4, 80.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-complex of same real values */ error = cpl_fft_image(image3c, image1c, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* In-place complex-to-complex of same real values */ error = cpl_fft_image(image1c, image1c, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_image_abs(image3c, image1c, type == CPL_TYPE_DOUBLE ? 128.0 * DBL_EPSILON : 40.0 * FLT_EPSILON); /* Extract half of c2c transform */ cpl_image_delete(image4); image4 = cpl_image_extract(image3c, 1, 1, nx/2 + 1, ny); cpl_test_image_abs(image3h, image4, 128.0 * nx * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-real, both full size */ error = cpl_fft_image(image2, image3r, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* The back-transformed must match the original image */ cpl_test_image_abs(image1r, image2, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-real, complex is half size */ error = cpl_fft_image(image2, image3h, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* The back-transformed must match the original image */ cpl_test_image_abs(image1r, image2, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-complex of same real values */ error = cpl_fft_image(image3r, image3c, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* In-place complex-to-complex of same real values */ error = cpl_fft_image(image3c, image3c, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_image_abs(image3r, image3c, 3.2 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* The back-transformed must match the original image - on the real part */ image4r = cpl_image_extract_real(image3r); cpl_test_image_abs(image1r, image4r, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* The back-transformed must have a zero-valued imaginary part */ image4c = cpl_image_extract_imag(image3r); cpl_image_delete(image4); image4 = cpl_image_new(nx, ny, type); cpl_test_image_abs(image4c, image4, 2.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); cpl_image_delete(image1r); cpl_image_delete(image1c); cpl_image_delete(image2); cpl_image_delete(image3r); cpl_image_delete(image3c); cpl_image_delete(image3h); cpl_image_delete(image4); cpl_image_delete(image4r); cpl_image_delete(image4c); cpl_image_delete(image5); } /*----------------------------------------------------------------------------*/ /** @internal @brief Unit tests of the function @param nx Size in x (the number of columns) @param ny Size in y (the number of rows) @param nz Size in z (the number of planes/images) @param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT @see cpl_fft_image() **/ /*----------------------------------------------------------------------------*/ static void cpl_fft_imagelist_test_one(cpl_size nx, cpl_size ny, cpl_size nz, cpl_type type) { const int rigor = CPL_FFT_FIND_MEASURE; cpl_imagelist * ilist1r = cpl_imagelist_new(); cpl_imagelist * ilist1c = cpl_imagelist_new(); cpl_imagelist * ilist2 = cpl_imagelist_new(); cpl_imagelist * ilist3r = cpl_imagelist_new(); cpl_imagelist * ilist3c = cpl_imagelist_new(); cpl_imagelist * ilist3h = cpl_imagelist_new(); cpl_imagelist * ilist4 = cpl_imagelist_new(); cpl_imagelist * ilist4r = cpl_imagelist_new(); cpl_imagelist * ilist4c = cpl_imagelist_new(); cpl_imagelist * ilistr = cpl_imagelist_new(); cpl_imagelist * ilistc = cpl_imagelist_new(); cpl_imagelist * ilist5 = cpl_imagelist_new(); cpl_error_code error; cpl_size i; for (i = 0; i < nz; i++) { cpl_image * image1r = cpl_image_new(nx, ny, type); cpl_image * image1c; cpl_image * image2 = cpl_image_new(nx, ny, type); cpl_image * image3r = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_image * image3c; cpl_image * image3h = cpl_image_new(nx/2+1, ny, type | CPL_TYPE_COMPLEX); cpl_image * image5 = cpl_image_new(nx, ny, type); error = cpl_image_fill_noise_uniform(image1r, 0.0, 1.0); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist1r, image1r, i); cpl_test_eq_error(error, CPL_ERROR_NONE); image1c = cpl_image_cast(image1r, type | CPL_TYPE_COMPLEX); error = cpl_imagelist_set(ilist1c, image1c, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist2 , image2, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist3r, image3r, i); cpl_test_eq_error(error, CPL_ERROR_NONE); image3c = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); error = cpl_imagelist_set(ilist3c, image3c, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist3h, image3h, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist5, image5, i); cpl_test_eq_error(error, CPL_ERROR_NONE); } /* Real-to-complex, both full size */ error = cpl_fft_imagelist(ilist3r, ilist1r, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* Extract half of r2c transform */ for (i = 0; i < nz; i++) { const cpl_image * image3r = cpl_imagelist_get_const(ilist3r, i); cpl_image * image4 = cpl_image_extract(image3r, 1, 1, nx/2 + 1, ny); error = cpl_imagelist_set(ilist4, image4, i); cpl_test_eq_error(error, CPL_ERROR_NONE); } /* Real-to-complex, complex is half size */ error = cpl_fft_imagelist(ilist3h, ilist1r, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* That half has to match the transform onto the half-sized image */ cpl_test_imagelist_abs(ilist3h, ilist4, 80.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-complex of same real values */ error = cpl_fft_imagelist(ilist3c, ilist1c, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* In-place complex-to-complex of same real values */ error = cpl_fft_imagelist(ilist1c, ilist1c, CPL_FFT_FORWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_imagelist_abs(ilist3c, ilist1c, 2.0 * (nx + ny) * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Extract half of c2c transform */ cpl_imagelist_empty(ilist4); for (i = 0; i < nz; i++) { const cpl_image * image3c = cpl_imagelist_get_const(ilist3c, i); cpl_image * image4 = cpl_image_extract(image3c, 1, 1, nx/2 + 1, ny); error = cpl_imagelist_set(ilist4, image4, i); cpl_test_eq_error(error, CPL_ERROR_NONE); } cpl_test_imagelist_abs(ilist3h, ilist4, 128.0 * nx * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-real, both full size */ error = cpl_fft_imagelist(ilist2, ilist3r, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* The back-transformed must match the original image */ cpl_test_imagelist_abs(ilist1r, ilist2, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-real, complex is half size */ error = cpl_fft_imagelist(ilist2, ilist3h, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* The back-transformed must match the original image */ cpl_test_imagelist_abs(ilist1r, ilist2, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* Complex-to-complex of same real values */ error = cpl_fft_imagelist(ilist3r, ilist3c, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); /* In-place complex-to-complex of same real values */ error = cpl_fft_imagelist(ilist3c, ilist3c, CPL_FFT_BACKWARD | rigor); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_imagelist_abs(ilist3r, ilist3c, 8.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); /* The back-transformed must match the original image - on the real part */ /* - and the back-transformed must have a zero-valued imaginary part */ cpl_imagelist_empty(ilist4); for (i = 0; i < nz; i++) { const cpl_image * image3r = cpl_imagelist_get_const(ilist3r, i); cpl_image * image4r = cpl_image_extract_real(image3r); cpl_image * image4c = cpl_image_extract_imag(image3r); cpl_image * image4 = cpl_image_new(nx, ny, type); error = cpl_imagelist_set(ilist4r, image4r, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist4c, image4c, i); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_imagelist_set(ilist4, image4, i); cpl_test_eq_error(error, CPL_ERROR_NONE); } cpl_test_imagelist_abs(ilist1r, ilist4r, 6.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); cpl_test_imagelist_abs(ilist4c, ilist4, 2.0 * (type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON)); cpl_imagelist_delete(ilist1r); cpl_imagelist_delete(ilist1c); cpl_imagelist_delete(ilist2); cpl_imagelist_delete(ilist3r); cpl_imagelist_delete(ilist3c); cpl_imagelist_delete(ilist3h); cpl_imagelist_delete(ilist4); cpl_imagelist_delete(ilist4r); cpl_imagelist_delete(ilist4c); cpl_imagelist_delete(ilistr); cpl_imagelist_delete(ilistc); cpl_imagelist_delete(ilist5); } /*----------------------------------------------------------------------------*/ /** @internal @brief Benchmark cpl_fft_imagelist() aginst cpl_fft_image() @param nx Size in x (the number of columns) @param ny Size in y (the number of rows) @param nz Size in z (the number of planes/images) @param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT @see cpl_fft_imagelist_test_one() cpl_fft_image_test_one() **/ /*----------------------------------------------------------------------------*/ static void cpl_fft_imagelist_test_image(cpl_size nx, cpl_size ny, cpl_size nz, cpl_type type) { cpl_flops flopl0, flopl1, flopi0, flopi1; double timel0, timel1, timei0, timei1; cpl_size i; flopl0 = cpl_test_get_flops(); timel0 = cpl_test_get_cputime(); cpl_fft_imagelist_test_one(nx, ny, nz, type); flopl1 = cpl_test_get_flops() - flopl0; timel1 = cpl_test_get_cputime() - timel0; flopi0 = cpl_test_get_flops(); timei0 = cpl_test_get_cputime(); for (i = 0; i < nz; i++) { cpl_fft_image_test_one(nx, ny, type); } flopi1 = cpl_test_get_flops() - flopi0; timei1 = cpl_test_get_cputime() - timei0; if (timei1 > 0.0 && timel1 > 0.0) { cpl_msg_info(cpl_func, "List vs single %d X %d X %d (%s): %g <=> %g " "[s] (%g <=> %g [MFLOP/s])", (int)nx, (int)ny, (int)nz, cpl_type_get_name(type), timel1, timei1, 1e-6*(double)flopl1/timel1, 1e-6*(double)flopi1/timei1); } else { cpl_msg_info(cpl_func, "List vs single %d X %d X %d (%s): %g <=> %g " "[s] (%g <=> %g [MFLOP])", (int)nx, (int)ny, (int)nz, cpl_type_get_name(type), timel1, timei1, 1e-6*(double)flopl1, 1e-6*(double)flopi1); } } /*----------------------------------------------------------------------------*/ /** @internal @brief Try to use the FFT for correlation @param nx Size in x (the number of columns) @param ny Size in y (the number of rows) @param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT @see cpl_fft_image_test_one() **/ /*----------------------------------------------------------------------------*/ static void cpl_fft_image_test_correlate(cpl_size nx, cpl_size ny, cpl_type type) { cpl_image * ia = cpl_image_new(nx, ny, type); cpl_image * ib = cpl_image_new(nx, ny, type); cpl_image * ic = cpl_image_new(nx, ny, type); cpl_image * fa = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_image * fb = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_image * fc = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX); cpl_imagelist * iab = cpl_imagelist_new(); cpl_imagelist * fab = cpl_imagelist_new(); cpl_size xmax, ymax; cpl_error_code code; code = cpl_imagelist_set(iab, ia, 0); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_imagelist_set(iab, ib, 1); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_imagelist_set(fab, fa, 0); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_imagelist_set(fab, fb, 1); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_fill_gaussian(ia, nx/2.0, ny/2.0, 1.0, 1.0, 1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_copy(ib, ia, 1, 1); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_shift(ib, nx/4, ny/4); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_fft_imagelist(fab, iab, CPL_FFT_FORWARD); cpl_test_eq_error(code, CPL_ERROR_NONE); /* Auto-correlate */ code = cpl_image_conjugate(fc, fa); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_multiply(fc, fa); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_fft_image(ic, fc, CPL_FFT_BACKWARD | CPL_FFT_NOSCALE); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_get_maxpos(ic, &xmax, &ymax); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_eq(xmax, 1); cpl_test_eq(ymax, 1); /* Cross-correlate */ code = cpl_image_conjugate(fc, fb); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_multiply(fc, fa); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_fft_image(ic, fc, CPL_FFT_BACKWARD | CPL_FFT_NOSCALE); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_image_get_maxpos(ic, &xmax, &ymax); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_eq(xmax, 1 + nx/2 + nx/4); cpl_test_eq(ymax, 1 + ny/2 + ny/4); cpl_imagelist_delete(iab); cpl_imagelist_delete(fab); cpl_image_delete(ic); cpl_image_delete(fc); } #endif
radix_sort.h
#ifndef _PCL_RADIX_SORT_ #define _PCL_RADIX_SORT_ #include <utility> #include <limits> #include "utils.h" #ifndef BKT_BITS #define BKT_BITS 12 #endif template<typename T> using Key_Value_Pair = std::pair<T, T>; template<typename T> Key_Value_Pair<T>* radix_sort_parallel(Key_Value_Pair<T>* inp_buf, Key_Value_Pair<T>* tmp_buf, int64_t elements_count, int64_t max_value) { constexpr int bkt_bits = BKT_BITS; constexpr int nbkts = (1 << bkt_bits); constexpr int bkt_mask = (nbkts - 1); int maxthreads = omp_get_max_threads(); int histogram[nbkts*maxthreads], histogram_ps[nbkts*maxthreads + 1]; if(max_value == 0) return inp_buf; int num_bits = 64; if(sizeof(T) == 8 && max_value > std::numeric_limits<int>::max()) { num_bits = sizeof(T) * 8 - __builtin_clzll(max_value); } else { num_bits = 32 - __builtin_clz((unsigned int)max_value); } int num_passes = (num_bits + bkt_bits - 1) / bkt_bits; #pragma omp parallel { int tid = omp_get_thread_num(); int nthreads = omp_get_num_threads(); int * local_histogram = &histogram[nbkts*tid]; int * local_histogram_ps = &histogram_ps[nbkts*tid]; int elements_count_4 = elements_count/4*4; Key_Value_Pair<T> * input = inp_buf; Key_Value_Pair<T> * output = tmp_buf; for(unsigned int pass = 0; pass < num_passes; pass++) { auto t1 = get_time(); /* Step 1: compute histogram Reset histogram */ for(int i = 0; i < nbkts; i++) local_histogram[i] = 0; #pragma omp for schedule(static) for(int64_t i = 0; i < elements_count_4; i+=4) { T val_1 = input[i].first; T val_2 = input[i+1].first; T val_3 = input[i+2].first; T val_4 = input[i+3].first; local_histogram[ (val_1>>(pass*bkt_bits)) & bkt_mask]++; local_histogram[ (val_2>>(pass*bkt_bits)) & bkt_mask]++; local_histogram[ (val_3>>(pass*bkt_bits)) & bkt_mask]++; local_histogram[ (val_4>>(pass*bkt_bits)) & bkt_mask]++; } if(tid == (nthreads -1)) { for(int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; local_histogram[ (val>>(pass*bkt_bits)) & bkt_mask]++; } } #pragma omp barrier auto t11 = get_time(); /* Step 2: prefix sum */ if(tid == 0) { int sum = 0, prev_sum = 0; for(int bins = 0; bins < nbkts; bins++) for(int t = 0; t < nthreads; t++) { sum += histogram[t*nbkts + bins]; histogram_ps[t*nbkts + bins] = prev_sum; prev_sum = sum; } histogram_ps[nbkts*nthreads] = prev_sum; if(prev_sum != elements_count) { printf("Error1!\n"); exit(123); } } #pragma omp barrier auto t12 = get_time(); /* Step 3: scatter */ #pragma omp for schedule(static) for(int64_t i = 0; i < elements_count_4; i+=4) { T val_1 = input[i].first; T val_2 = input[i+1].first; T val_3 = input[i+2].first; T val_4 = input[i+3].first; T bin_1 = (val_1>>(pass*bkt_bits)) & bkt_mask; T bin_2 = (val_2>>(pass*bkt_bits)) & bkt_mask; T bin_3 = (val_3>>(pass*bkt_bits)) & bkt_mask; T bin_4 = (val_4>>(pass*bkt_bits)) & bkt_mask; int pos; pos = local_histogram_ps[bin_1]++; output[pos] = input[i]; pos = local_histogram_ps[bin_2]++; output[pos] = input[i+1]; pos = local_histogram_ps[bin_3]++; output[pos] = input[i+2]; pos = local_histogram_ps[bin_4]++; output[pos] = input[i+3]; } if(tid == (nthreads -1)) { for(int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; int pos = local_histogram_ps[ (val>>(pass*bkt_bits)) & bkt_mask]++; output[pos] = input[i]; } } Key_Value_Pair<T> * temp = input; input = output; output = temp; #pragma omp barrier auto t2 = get_time(); #ifdef DEBUG_TIME if (tid == 0) printf("pass = %d total time = %.3f step1 = %.3f step2 = %.3f %.3f\n", pass, t2-t1, t11-t1, t12-t11, t2-t12); #endif } } return (num_passes % 2 == 0 ? inp_buf : tmp_buf); } #endif
cloudsc_validate.c
/* * (C) Copyright 1988- ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation * nor does it submit to any jurisdiction. */ #include "cloudsc_validate.h" #include <float.h> #include <math.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) void print_error(const char *name, double zminval, double zmaxval, double zmaxerr, double zerrsum, double zsum, double zavgpgp, int ndim) { double zrelerr, zeps = DBL_EPSILON; int iopt = 0; if (zerrsum < zeps) { zrelerr = 0.0; iopt = 1; } else if (zsum < zeps) { zrelerr = zerrsum / (1.0 + zsum); iopt = 2; } else { zrelerr = zerrsum / zsum; iopt = 3; } //-- If you get 4 exclamation marks next to your error output, // then it is likely that some uninitialized variables exists or // some other screw-up -- watch out this !!!! char *clwarn; clwarn = (zrelerr > 10.0 * zeps) ? " !!!!" : " "; zrelerr = 100.0 * zrelerr; printf(" %+20s %dD%d %20.13le %20.13le %20.13le %20.13le %20.13le %s\n", name, ndim, iopt, zminval, zmaxval, zmaxerr, zavgpgp, zrelerr, clwarn); } void validate_1d(const char *name, double * v_ref, double * v_field, int nlon, int ngptot, int nblocks) { /* Computes and prints errors in the "L2 norm sense" */ int b, bsize, jk; double zminval, zmaxval, zdiff, zmaxerr, zerrsum, zsum, zrelerr, zavgpgp; double (*field)[nlon] = (double (*)[nlon]) v_field; double (*reference)[nlon] = (double (*)[nlon]) v_ref; zminval = +DBL_MAX; zmaxval = -DBL_MAX; zmaxerr = 0.0; zerrsum = 0.0; zsum = 0.0; #pragma omp parallel for default(shared) private(b, bsize, jk) \ reduction(min:zminval) reduction(max:zmaxval,zmaxerr) reduction(+:zerrsum,zsum) for (b = 0; b < nblocks; b++) { bsize = min(nlon, ngptot - b*nlon); // field block size for (jk = 0; jk < bsize; jk++) { zminval = fmin(zminval, field[b][jk]); zmaxval = fmax(zmaxval, field[b][jk]); // Difference against reference result in one-norm sense zdiff = fabs(field[b][jk] - reference[b][jk]); zmaxerr = fmax(zmaxerr, zdiff); zerrsum = zerrsum + zdiff; zsum = zsum + abs(reference[b][jk]); } } zavgpgp = zerrsum / (double) ngptot; print_error(name, zminval, zmaxval, zmaxerr, zerrsum, zsum, zavgpgp, 2); } void validate_2d(const char *name, double *v_ref, double *v_field, int nlon, int nlev, int ngptot, int nblocks) { /* Computes and prints errors in the "L2 norm sense" */ int b, bsize, jl, jk; double zminval, zmaxval, zdiff, zmaxerr, zerrsum, zsum, zrelerr, zavgpgp; double (*field)[nlev][nlon] = (double (*)[nlev][nlon]) v_field; double (*reference)[nlev][nlon] = (double (*)[nlev][nlon]) v_ref; zminval = +DBL_MAX; zmaxval = -DBL_MAX; zmaxerr = 0.0; zerrsum = 0.0; zsum = 0.0; #pragma omp parallel for default(shared) private(b, bsize, jl, jk) \ reduction(min:zminval) reduction(max:zmaxval,zmaxerr) reduction(+:zerrsum,zsum) for (b = 0; b < nblocks; b++) { bsize = min(nlon, ngptot - b*nlon); // field block size for (jl = 0; jl < nlev; jl++) { for (jk = 0; jk < bsize; jk++) { zminval = fmin(zminval, field[b][jl][jk]); zmaxval = fmax(zmaxval, field[b][jl][jk]); // Difference against reference result in one-norm sense zdiff = fabs(field[b][jl][jk] - reference[b][jl][jk]); zmaxerr = fmax(zmaxerr, zdiff); zerrsum = zerrsum + zdiff; zsum = zsum + abs(reference[b][jl][jk]); } } } zavgpgp = zerrsum / (double) ngptot; print_error(name, zminval, zmaxval, zmaxerr, zerrsum, zsum, zavgpgp, 2); } void validate_3d(const char *name, double *v_ref, double *v_field, int nlon, int nlev, int nclv, int ngptot, int nblocks) { /* Computes and prints errors in the "L2 norm sense" */ int b, bsize, jl, jk, jm; double zminval, zmaxval, zdiff, zmaxerr, zerrsum, zsum, zrelerr, zavgpgp; double (*field)[nclv][nlev][nlon] = (double (*)[nclv][nlev][nlon]) v_field; double (*reference)[nclv][nlev][nlon] = (double (*)[nclv][nlev][nlon]) v_ref; zminval = +DBL_MAX; zmaxval = -DBL_MAX; zmaxerr = 0.0; zerrsum = 0.0; zsum = 0.0; #pragma omp parallel for default(shared) private(b, bsize, jl, jk, jm) \ reduction(min:zminval) reduction(max:zmaxval,zmaxerr) reduction(+:zerrsum,zsum) for (b = 0; b < nblocks; b++) { bsize = min(nlon, ngptot - b*nlon); // field block size for (jm = 0; jm < nclv; jm++) { for (jl = 0; jl < nlev; jl++) { for (jk = 0; jk < bsize; jk++) { zminval = fmin(zminval, field[b][jm][jl][jk]); zmaxval = fmax(zmaxval, field[b][jm][jl][jk]); // Difference against reference result in one-norm sense zdiff = fabs(field[b][jm][jl][jk] - reference[b][jm][jl][jk]); zmaxerr = fmax(zmaxerr, zdiff); zerrsum = zerrsum + zdiff; zsum = zsum + abs(reference[b][jm][jl][jk]); } } } } zavgpgp = zerrsum / (double) ngptot; print_error(name, zminval, zmaxval, zmaxerr, zerrsum, zsum, zavgpgp, 2); } int cloudsc_validate(const int nlon, const int nlev, const int nclv, const int ngptot, const int nproma, double *plude, double *pcovptot, double *prainfrac_toprfz, double *pfsqlf, double *pfsqif, double *pfcqlng, double *pfcqnng, double *pfsqrf, double *pfsqsf, double *pfcqrng, double *pfcqsng, double *pfsqltur, double *pfsqitur, double *pfplsl, double *pfplsn, double *pfhpsl, double *pfhpsn, double *tend_loc_a, double *tend_loc_q, double *tend_loc_t, double *tend_loc_cld) { const int nblocks = (ngptot / nproma) + min(ngptot % nproma, 1); double *ref_plude = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); double *ref_pcovptot = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); double *ref_prainfrac_toprfz = (double*) malloc( sizeof(double) * nblocks*nproma ); double *ref_pfsqlf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfsqif = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfcqlng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfcqnng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfsqrf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfsqsf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfcqrng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfcqsng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfsqltur = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfsqitur = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfplsl = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfplsn = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfhpsl = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_pfhpsn = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); double *ref_tend_loc_a = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); double *ref_tend_loc_q = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); double *ref_tend_loc_t = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); double *ref_tend_loc_cld = (double*) malloc( sizeof(double) * nblocks*nclv*nlev*nproma ); load_reference(nlon, nlev, nclv, ngptot, nproma, ref_plude, ref_pcovptot, ref_prainfrac_toprfz, ref_pfsqlf, ref_pfsqif, ref_pfcqlng, ref_pfcqnng, ref_pfsqrf, ref_pfsqsf, ref_pfcqrng, ref_pfcqsng, ref_pfsqltur, ref_pfsqitur, ref_pfplsl, ref_pfplsn, ref_pfhpsl, ref_pfhpsn, ref_tend_loc_a, ref_tend_loc_q, ref_tend_loc_t, ref_tend_loc_cld); printf(" %+20s %s %+20s %+20s %+20s %+20s %+20s\n", "Variable", "Dim", "MinValue", "MaxValue", "AbsMaxErr", "AvgAbsErr/GP", "MaxRelErr-%"); validate_2d("PLUDE", ref_plude, plude, nproma, nlev, ngptot, nblocks); validate_2d("PCOVPTOT", ref_pcovptot, pcovptot, nproma, nlev, ngptot, nblocks); validate_1d("PRAINFRAC_TOPRFZ", ref_prainfrac_toprfz, prainfrac_toprfz, nproma, ngptot, nblocks); validate_2d("PFSQLF", ref_pfsqlf, pfsqlf, nproma, nlev+1, ngptot, nblocks); validate_2d("PFSQIF", ref_pfsqif, pfsqif, nproma, nlev+1, ngptot, nblocks); validate_2d("PFCQLNG", ref_pfcqlng, pfcqlng, nproma, nlev+1, ngptot, nblocks); validate_2d("PFCQNNG", ref_pfcqnng, pfcqnng, nproma, nlev+1, ngptot, nblocks); validate_2d("PFSQRF", ref_pfsqrf, pfsqrf, nproma, nlev+1, ngptot, nblocks); validate_2d("PFSQSF", ref_pfsqsf, pfsqsf, nproma, nlev+1, ngptot, nblocks); validate_2d("PFCQRNG", ref_pfcqrng, pfcqrng, nproma, nlev+1, ngptot, nblocks); validate_2d("PFCQSNG", ref_pfcqsng, pfcqsng, nproma, nlev+1, ngptot, nblocks); validate_2d("PFSQLTUR", ref_pfsqltur, pfsqltur, nproma, nlev+1, ngptot, nblocks); validate_2d("PFSQITUR", ref_pfsqitur, pfsqitur, nproma, nlev+1, ngptot, nblocks); validate_2d("PFPLSL", ref_pfplsl, pfplsl, nproma, nlev+1, ngptot, nblocks); validate_2d("PFPLSN", ref_pfplsn, pfplsn, nproma, nlev+1, ngptot, nblocks); validate_2d("PFHPSL", ref_pfhpsl, pfhpsl, nproma, nlev+1, ngptot, nblocks); validate_2d("PFHPSN", ref_pfhpsn, pfhpsn, nproma, nlev+1, ngptot, nblocks); validate_2d("TENDENCY_LOC%A", ref_tend_loc_a, tend_loc_a, nproma, nlev, ngptot, nblocks); validate_2d("TENDENCY_LOC%Q", ref_tend_loc_q, tend_loc_q, nproma, nlev, ngptot, nblocks); validate_2d("TENDENCY_LOC%T", ref_tend_loc_t, tend_loc_t, nproma, nlev, ngptot, nblocks); validate_3d("TENDENCY_LOC%CLD", ref_tend_loc_cld, tend_loc_cld, nproma, nlev, nclv, ngptot, nblocks); free(ref_plude); free(ref_pcovptot); free(ref_prainfrac_toprfz); free(ref_pfsqlf); free(ref_pfsqif); free(ref_pfcqlng); free(ref_pfcqnng); free(ref_pfsqrf); free(ref_pfsqsf); free(ref_pfcqrng); free(ref_pfcqsng); free(ref_pfsqltur); free(ref_pfsqitur); free(ref_pfplsl); free(ref_pfplsn); free(ref_pfhpsl); free(ref_pfhpsn); free(ref_tend_loc_a); free(ref_tend_loc_q); free(ref_tend_loc_t); free(ref_tend_loc_cld); }
HybridRepSetReader.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, [email protected], Argonne National Laboratory // // File created by: Ye Luo, [email protected], Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// /** @file * * derived from SplineSetReader */ #ifndef QMCPLUSPLUS_HYBRIDREP_READER_H #define QMCPLUSPLUS_HYBRIDREP_READER_H #include "Numerics/Quadrature.h" #include "Numerics/Bessel.h" #include "QMCWaveFunctions/BsplineFactory/HybridRepCenterOrbitals.h" #include "OhmmsData/AttributeSet.h" #include "config/stdlib/math.hpp" //#include "QMCHamiltonians/Ylm.h" //#define PRINT_RADIAL namespace qmcplusplus { template<typename ST, typename LT> struct Gvectors { typedef TinyVector<ST, 3> PosType; typedef std::complex<ST> ValueType; const LT& Lattice; std::vector<PosType> gvecs_cart; //Cartesian. std::vector<ST> gmag; const size_t NumGvecs; Gvectors(const std::vector<TinyVector<int, 3>>& gvecs_in, const LT& Lattice_in, const TinyVector<int, 3>& HalfG, size_t first, size_t last) : Lattice(Lattice_in), NumGvecs(last - first) { gvecs_cart.resize(NumGvecs); gmag.resize(NumGvecs); #pragma omp parallel for for (size_t ig = 0; ig < NumGvecs; ig++) { TinyVector<ST, 3> gvec_shift; gvec_shift = gvecs_in[ig + first] + HalfG * 0.5; gvecs_cart[ig] = Lattice.k_cart(gvec_shift); gmag[ig] = std::sqrt(dot(gvecs_cart[ig], gvecs_cart[ig])); } } template<typename YLM_ENGINE, typename VVT> void calc_Ylm_G(const size_t ig, YLM_ENGINE& Ylm, VVT& YlmG) const { PosType Ghat(0.0, 0.0, 1.0); if (gmag[ig] > 0) Ghat = gvecs_cart[ig] / gmag[ig]; Ylm.evaluateV(Ghat[0], Ghat[1], Ghat[2], YlmG.data()); } template<typename VVT> inline void calc_jlm_G(const int lmax, ST& r, const size_t ig, VVT& j_lm_G) const { bessel_steed_array_cpu(lmax, gmag[ig] * r, j_lm_G.data()); for (size_t l = lmax; l > 0; l--) for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) j_lm_G[lm] = j_lm_G[l]; } template<typename PT, typename VT> inline void calc_phase_shift(const PT& RSoA, const size_t ig, VT& phase_shift_real, VT& phase_shift_imag) const { const ST* restrict px = RSoA.data(0); const ST* restrict py = RSoA.data(1); const ST* restrict pz = RSoA.data(2); ST* restrict v_r = phase_shift_real.data(); ST* restrict v_i = phase_shift_imag.data(); const ST& gv_x = gvecs_cart[ig][0]; const ST& gv_y = gvecs_cart[ig][1]; const ST& gv_z = gvecs_cart[ig][2]; #pragma omp simd aligned(px, py, pz, v_r, v_i) for (size_t iat = 0; iat < RSoA.size(); iat++) qmcplusplus::sincos(px[iat] * gv_x + py[iat] * gv_y + pz[iat] * gv_z, v_i + iat, v_r + iat); } template<typename PT> ValueType evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos) { assert(cG.size() == NumGvecs); std::complex<ST> val(0.0, 0.0); for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; qmcplusplus::sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); val += cG[ig] * pw0; } return val; } template<typename PT> void evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos, ValueType& phi, ValueType& d2phi) { assert(cG.size() == NumGvecs); d2phi = phi = 0.0; for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; qmcplusplus::sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); phi += cG[ig] * pw0; d2phi += cG[ig] * pw0 * (-dot(gvecs_cart[ig], gvecs_cart[ig])); } } double evaluate_KE(const Vector<std::complex<double>>& cG) { assert(cG.size() == NumGvecs); double KE = 0; for (size_t ig = 0; ig < NumGvecs; ig++) KE += dot(gvecs_cart[ig], gvecs_cart[ig]) * (cG[ig].real() * cG[ig].real() + cG[ig].imag() * cG[ig].imag()); return KE / 2.0; } }; /** General HybridRepSetReader to handle any unitcell */ template<typename SA> struct HybridRepSetReader : public SplineSetReader<SA> { typedef SplineSetReader<SA> BaseReader; using BaseReader::bspline; using BaseReader::mybuilder; using BaseReader::rotate_phase_i; using BaseReader::rotate_phase_r; using typename BaseReader::DataType; HybridRepSetReader(EinsplineSetBuilder* e) : BaseReader(e) {} /** initialize basic parameters of atomic orbitals */ void initialize_hybridrep_atomic_centers() override { OhmmsAttributeSet a; std::string scheme_name("Consistent"); std::string s_function_name("LEKS2018"); a.add(scheme_name, "smoothing_scheme"); a.add(s_function_name, "smoothing_function"); a.put(mybuilder->XMLRoot); // assign smooth_scheme if (scheme_name == "Consistent") bspline->smooth_scheme = SA::smoothing_schemes::CONSISTENT; else if (scheme_name == "SmoothAll") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHALL; else if (scheme_name == "SmoothPartial") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHPARTIAL; else APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_scheme name! Only allows Consistent, SmoothAll or " "SmoothPartial."); // assign smooth_function if (s_function_name == "LEKS2018") bspline->smooth_func_id = smoothing_functions::LEKS2018; else if (s_function_name == "coscos") bspline->smooth_func_id = smoothing_functions::COSCOS; else if (s_function_name == "linear") bspline->smooth_func_id = smoothing_functions::LINEAR; else APP_ABORT( "initialize_hybridrep_atomic_centers wrong smoothing_function name! Only allows LEKS2018, coscos or linear."); app_log() << "Hybrid orbital representation uses " << scheme_name << " smoothing scheme and " << s_function_name << " smoothing function." << std::endl; bspline->set_info(*(mybuilder->SourcePtcl), mybuilder->TargetPtcl, mybuilder->Super2Prim); auto& centers = bspline->AtomicCenters; auto& ACInfo = mybuilder->AtomicCentersInfo; // load atomic center info only when it is not initialized if (centers.size() == 0) { bool success = true; app_log() << "Reading atomic center info for hybrid representation" << std::endl; for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { const int my_GroupID = ACInfo.GroupID[center_idx]; if (ACInfo.cutoff[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'cutoff_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.inner_cutoff[center_idx] < 0) { const double inner_cutoff = std::max(ACInfo.cutoff[center_idx] - 0.3, 0.0); app_log() << "Hybrid orbital representation setting 'inner_cutoff' to " << inner_cutoff << " for group " << my_GroupID << " as atom " << center_idx << std::endl; // overwrite the inner_cutoff of all the atoms of the same species for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) ACInfo.inner_cutoff[id] = inner_cutoff; } else if (ACInfo.inner_cutoff[center_idx] > ACInfo.cutoff[center_idx]) { app_error() << "Hybrid orbital representation 'inner_cutoff' must be smaller than 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.cutoff[center_idx] > 0) { if (ACInfo.lmax[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'lmax' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_radius[center_idx] < 0 && ACInfo.spline_npoints[center_idx] < 0) { app_log() << "Parameters 'spline_radius' and 'spline_npoints' for group " << my_GroupID << " as atom " << center_idx << " are not specified." << std::endl; const double delta = std::min(0.02, ACInfo.cutoff[center_idx] / 4.0); const int n_grid_point = std::ceil((ACInfo.cutoff[center_idx] + 1e-4) / delta) + 3; for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) { ACInfo.spline_npoints[id] = n_grid_point; ACInfo.spline_radius[id] = (n_grid_point - 1) * delta; } app_log() << " Based on default grid point distance " << delta << std::endl; app_log() << " Setting 'spline_npoints' to " << ACInfo.spline_npoints[center_idx] << std::endl; app_log() << " Setting 'spline_radius' to " << ACInfo.spline_radius[center_idx] << std::endl; } else { if (ACInfo.spline_radius[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_npoints[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_npoints' for atom " << center_idx << std::endl; success = false; } } // check maximally allowed cutoff_radius double max_allowed_cutoff = ACInfo.spline_radius[center_idx] - 2.0 * ACInfo.spline_radius[center_idx] / (ACInfo.spline_npoints[center_idx] - 1); if (success && ACInfo.cutoff[center_idx] > max_allowed_cutoff) { app_error() << "Hybrid orbital representation requires cutoff_radius<=" << max_allowed_cutoff << " calculated by spline_radius-2*spline_radius/(spline_npoints-1) for atom " << center_idx << std::endl; success = false; } } else { // no atomic regions for this atom type ACInfo.spline_radius[center_idx] = 0.0; ACInfo.spline_npoints[center_idx] = 0; ACInfo.lmax[center_idx] = 0; } } if (!success) BaseReader::myComm->barrier_and_abort("initialize_hybridrep_atomic_centers Failed to initialize atomic centers " "in hybrid orbital representation!"); for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { AtomicOrbitals<DataType> oneCenter(ACInfo.lmax[center_idx]); oneCenter.set_info(ACInfo.ion_pos[center_idx], ACInfo.cutoff[center_idx], ACInfo.inner_cutoff[center_idx], ACInfo.spline_radius[center_idx], ACInfo.non_overlapping_radius[center_idx], ACInfo.spline_npoints[center_idx]); centers.push_back(oneCenter); } } } /** initialize construct atomic orbital radial functions from plane waves */ inline void create_atomic_centers_Gspace(Vector<std::complex<double>>& cG, Communicate& band_group_comm, int iorb) override { band_group_comm.bcast(rotate_phase_r); band_group_comm.bcast(rotate_phase_i); band_group_comm.bcast(cG); //distribute G-vectors over processor groups const int Ngvecs = mybuilder->Gvecs[0].size(); const int Nprocs = band_group_comm.size(); const int Ngvecgroups = std::min(Ngvecs, Nprocs); Communicate gvec_group_comm(band_group_comm, Ngvecgroups); std::vector<int> gvec_groups(Ngvecgroups + 1, 0); FairDivideLow(Ngvecs, Ngvecgroups, gvec_groups); const int gvec_first = gvec_groups[gvec_group_comm.getGroupID()]; const int gvec_last = gvec_groups[gvec_group_comm.getGroupID() + 1]; // prepare Gvecs Ylm(G) typedef typename EinsplineSetBuilder::UnitCellType UnitCellType; Gvectors<double, UnitCellType> Gvecs(mybuilder->Gvecs[0], mybuilder->PrimCell, bspline->HalfG, gvec_first, gvec_last); // if(band_group_comm.isGroupLeader()) std::cout << "print band=" << iorb << " KE=" << Gvecs.evaluate_KE(cG) << std::endl; std::vector<AtomicOrbitals<DataType>>& centers = bspline->AtomicCenters; app_log() << "Transforming band " << iorb << " on Rank 0" << std::endl; // collect atomic centers by group std::vector<int> uniq_species; for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; int found_idx = -1; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { found_idx = idx; break; } if (found_idx < 0) uniq_species.push_back(my_GroupID); } // construct group list std::vector<std::vector<int>> group_list(uniq_species.size()); for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { group_list[idx].push_back(center_idx); break; } } for (int group_idx = 0; group_idx < group_list.size(); group_idx++) { const auto& mygroup = group_list[group_idx]; const double spline_radius = centers[mygroup[0]].getSplineRadius(); const int spline_npoints = centers[mygroup[0]].getSplineNpoints(); const int lmax = centers[mygroup[0]].getLmax(); const double delta = spline_radius / static_cast<double>(spline_npoints - 1); const int lm_tot = (lmax + 1) * (lmax + 1); const size_t natoms = mygroup.size(); const int policy = lm_tot > natoms ? 0 : 1; std::vector<std::complex<double>> i_power(lm_tot); // rotate phase is introduced here. std::complex<double> i_temp(rotate_phase_r, rotate_phase_i); for (size_t l = 0; l <= lmax; l++) { for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) i_power[lm] = i_temp; i_temp *= std::complex<double>(0.0, 1.0); } std::vector<Matrix<double>> all_vals(natoms); std::vector<std::vector<aligned_vector<double>>> vals_local(spline_npoints * omp_get_max_threads()); VectorSoaContainer<double, 3> myRSoA(natoms); for (size_t idx = 0; idx < natoms; idx++) { all_vals[idx].resize(spline_npoints, lm_tot * 2); all_vals[idx] = 0.0; myRSoA(idx) = centers[mygroup[idx]].getCenterPos(); } #pragma omp parallel { const size_t tid = omp_get_thread_num(); const size_t nt = omp_get_num_threads(); for (int ip = 0; ip < spline_npoints; ip++) { const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_local[ip_idx].resize(lm_tot * 2); for (size_t lm = 0; lm < lm_tot * 2; lm++) { auto& vals = vals_local[ip_idx][lm]; vals.resize(natoms); std::fill(vals.begin(), vals.end(), 0.0); } } else { vals_local[ip_idx].resize(natoms * 2); for (size_t iat = 0; iat < natoms * 2; iat++) { auto& vals = vals_local[ip_idx][iat]; vals.resize(lm_tot); std::fill(vals.begin(), vals.end(), 0.0); } } } const size_t size_pw_tile = 32; const size_t num_pw_tiles = (Gvecs.NumGvecs + size_pw_tile - 1) / size_pw_tile; aligned_vector<double> j_lm_G(lm_tot, 0.0); std::vector<aligned_vector<double>> phase_shift_r(size_pw_tile); std::vector<aligned_vector<double>> phase_shift_i(size_pw_tile); std::vector<aligned_vector<double>> YlmG(size_pw_tile); for (size_t ig = 0; ig < size_pw_tile; ig++) { phase_shift_r[ig].resize(natoms); phase_shift_i[ig].resize(natoms); YlmG[ig].resize(lm_tot); } SoaSphericalTensor<double> Ylm(lmax); #pragma omp for for (size_t tile_id = 0; tile_id < num_pw_tiles; tile_id++) { const size_t ig_first = tile_id * size_pw_tile; const size_t ig_last = std::min((tile_id + 1) * size_pw_tile, Gvecs.NumGvecs); for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate phase shift for all the centers of this group Gvecs.calc_phase_shift(myRSoA, ig, phase_shift_r[ig_local], phase_shift_i[ig_local]); Gvecs.calc_Ylm_G(ig, Ylm, YlmG[ig_local]); } for (int ip = 0; ip < spline_npoints; ip++) { double r = delta * static_cast<double>(ip); const size_t ip_idx = tid * spline_npoints + ip; for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate spherical bessel function Gvecs.calc_jlm_G(lmax, r, ig, j_lm_G); for (size_t lm = 0; lm < lm_tot; lm++) j_lm_G[lm] *= YlmG[ig_local][lm]; const double cG_r = cG[ig + gvec_first].real(); const double cG_i = cG[ig + gvec_first].imag(); if (policy == 1) { for (size_t lm = 0; lm < lm_tot; lm++) { double* restrict vals_r = vals_local[ip_idx][lm * 2].data(); double* restrict vals_i = vals_local[ip_idx][lm * 2 + 1].data(); const double* restrict ps_r_ptr = phase_shift_r[ig_local].data(); const double* restrict ps_i_ptr = phase_shift_i[ig_local].data(); double cG_j_r = cG_r * j_lm_G[lm]; double cG_j_i = cG_i * j_lm_G[lm]; #pragma omp simd aligned(vals_r, vals_i, ps_r_ptr, ps_i_ptr) for (size_t idx = 0; idx < natoms; idx++) { const double ps_r = ps_r_ptr[idx]; const double ps_i = ps_i_ptr[idx]; vals_r[idx] += cG_j_r * ps_r - cG_j_i * ps_i; vals_i[idx] += cG_j_i * ps_r + cG_j_r * ps_i; } } } else { for (size_t idx = 0; idx < natoms; idx++) { double* restrict vals_r = vals_local[ip_idx][idx * 2].data(); double* restrict vals_i = vals_local[ip_idx][idx * 2 + 1].data(); const double* restrict j_lm_G_ptr = j_lm_G.data(); double cG_ps_r = cG_r * phase_shift_r[ig_local][idx] - cG_i * phase_shift_i[ig_local][idx]; double cG_ps_i = cG_i * phase_shift_r[ig_local][idx] + cG_r * phase_shift_i[ig_local][idx]; #pragma omp simd aligned(vals_r, vals_i, j_lm_G_ptr) for (size_t lm = 0; lm < lm_tot; lm++) { const double jlm = j_lm_G_ptr[lm]; vals_r[lm] += cG_ps_r * jlm; vals_i[lm] += cG_ps_i * jlm; } } } } } } #pragma omp for collapse(2) for (int ip = 0; ip < spline_npoints; ip++) for (size_t idx = 0; idx < natoms; idx++) { double* vals = all_vals[idx][ip]; for (size_t tid = 0; tid < nt; tid++) for (size_t lm = 0; lm < lm_tot; lm++) { double vals_th_r, vals_th_i; const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_th_r = vals_local[ip_idx][lm * 2][idx]; vals_th_i = vals_local[ip_idx][lm * 2 + 1][idx]; } else { vals_th_r = vals_local[ip_idx][idx * 2][lm]; vals_th_i = vals_local[ip_idx][idx * 2 + 1][lm]; } const double real_tmp = 4.0 * M_PI * i_power[lm].real(); const double imag_tmp = 4.0 * M_PI * i_power[lm].imag(); vals[lm] += vals_th_r * real_tmp - vals_th_i * imag_tmp; vals[lm + lm_tot] += vals_th_i * real_tmp + vals_th_r * imag_tmp; } } } //app_log() << "Building band " << iorb << " at center " << center_idx << std::endl; for (size_t idx = 0; idx < natoms; idx++) { // reduce all_vals band_group_comm.reduce_in_place(all_vals[idx].data(), all_vals[idx].size()); if (!band_group_comm.isGroupLeader()) continue; #pragma omp parallel for for (int lm = 0; lm < lm_tot; lm++) { auto& mycenter = centers[mygroup[idx]]; aligned_vector<double> splineData_r(spline_npoints); UBspline_1d_d* atomic_spline_r; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_r[ip] = all_vals[idx][ip][lm]; atomic_spline_r = einspline::create(atomic_spline_r, 0.0, spline_radius, spline_npoints, splineData_r.data(), ((lm == 0) || (lm > 3))); if (!bspline->is_complex) { mycenter.set_spline(atomic_spline_r, lm, iorb); einspline::destroy(atomic_spline_r); } else { aligned_vector<double> splineData_i(spline_npoints); UBspline_1d_d* atomic_spline_i; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_i[ip] = all_vals[idx][ip][lm + lm_tot]; atomic_spline_i = einspline::create(atomic_spline_i, 0.0, spline_radius, spline_npoints, splineData_i.data(), ((lm == 0) || (lm > 3))); mycenter.set_spline(atomic_spline_r, lm, iorb * 2); mycenter.set_spline(atomic_spline_i, lm, iorb * 2 + 1); einspline::destroy(atomic_spline_r); einspline::destroy(atomic_spline_i); } } } #ifdef PRINT_RADIAL char fname[64]; sprintf(fname, "band_%d_center_%d_pw.dat", iorb, center_idx); FILE* fout_pw = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_v.dat", iorb, center_idx); FILE* fout_spline_v = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_g.dat", iorb, center_idx); FILE* fout_spline_g = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_l.dat", iorb, center_idx); FILE* fout_spline_l = fopen(fname, "w"); fprintf(fout_pw, "# r vals(lm)\n"); fprintf(fout_spline_v, "# r vals(lm)\n"); fprintf(fout_spline_g, "# r grads(lm)\n"); fprintf(fout_spline_l, "# r lapls(lm)\n"); // write to file for plotting for (int ip = 0; ip < spline_npoints - 1; ip++) { double r = delta * static_cast<double>(ip); mycenter.SplineInst->evaluate_vgl(r, mycenter.localV, mycenter.localG, mycenter.localL); fprintf(fout_pw, "%15.10lf ", r); fprintf(fout_spline_v, "%15.10lf ", r); fprintf(fout_spline_g, "%15.10lf ", r); fprintf(fout_spline_l, "%15.10lf ", r); for (int lm = 0; lm < lm_tot; lm++) { fprintf(fout_pw, "%15.10lf %15.10lf ", all_vals[center_idx][ip][lm].real(), all_vals[center_idx][ip][lm].imag()); fprintf(fout_spline_v, "%15.10lf %15.10lf ", mycenter.localV[lm * mycenter.Npad + iorb * 2], mycenter.localV[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_g, "%15.10lf %15.10lf ", mycenter.localG[lm * mycenter.Npad + iorb * 2], mycenter.localG[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_l, "%15.10lf %15.10lf ", mycenter.localL[lm * mycenter.Npad + iorb * 2], mycenter.localL[lm * mycenter.Npad + iorb * 2 + 1]); } fprintf(fout_pw, "\n"); fprintf(fout_spline_v, "\n"); fprintf(fout_spline_g, "\n"); fprintf(fout_spline_l, "\n"); } fclose(fout_pw); fclose(fout_spline_v); fclose(fout_spline_g); fclose(fout_spline_l); #endif } } }; } // namespace qmcplusplus #endif
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> int indexSet[180] = {521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main(int argc, char * argv[]) { double * base = (double * )malloc(sizeof (double)*((2013+12)+1)); double * xa1 = base; double * xa2 = base+12; int i; int _ret_val_0; if (base==0) { printf("Error, malloc() returns NULL. End execution. \n"); _ret_val_0=1; return _ret_val_0; } #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for for (i=521; i<=2025; ++ i) { base[i]=0.0; } /* this level of loop has no loop carried dependence */ #pragma loop name main#1 for (i=0; i<180; ++ i) { int idx = indexSet[i]; xa1[idx]+=1.0; xa2[idx]+=3.0; } /* verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice */ #pragma loop name main#2 for (i=521; i<=2025; ++ i) { /* printf ("%f ", base[i]); */ (((void)sizeof ((base[i]!=4.0) ? 1 : 0)), ({ if (base[i]!=4.0) { ; } else { __assert_fail("base[i]!=4.0", "DRB052-indirectaccesssharebase-orig-no.c", 126, __PRETTY_FUNCTION__); } })); } free(base); _ret_val_0=0; return _ret_val_0; }
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
pr67521.c
/* PR middle-end/67521 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (int x) { int i = 0; #pragma omp parallel for simd for (i = (i & x); i < 10; i = i + 2) /* { dg-error "initializer expression refers to iteration variable" } */ ; i = 0; #pragma omp parallel for simd for (i = 0; i < (i & x) + 10; i = i + 2) /* { dg-error "condition expression refers to iteration variable" } */ ; i = 0; #pragma omp parallel for simd for (i = 0; i < 10; i = i + ((i & x) + 2)) /* { dg-error "increment expression refers to iteration variable" } */ ; }
GB_unop__identity_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint8) // op(A') function: GB (_unop_tran__identity_uint64_uint8) // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint8) ( uint64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_int8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_int8 // op(A') function: GB_tran__minv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dpado.202001272104.batch_id_back.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 0; // Structure for the type of label struct IndexType { struct Batch { // VertexID batch_id; // Batch ID VertexID start_index; // Index to the array distances where the batch starts VertexID size; // Number of distances element in this batch Batch() = default; Batch(VertexID start_index_, VertexID size_): start_index(start_index_), size(size_) { } // Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { } }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-distance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t get_size_in_bytes() const { return sizeof(bp_dist) + sizeof(bp_sets) + // batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } void clean_all_indices() { std::vector<Batch>().swap(batches); std::vector<DistanceIndexType>().swap(distances); std::vector<VertexID>().swap(vertices); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // If the Batch structure is not used, the indicator could just be BATCH_SIZE long. // std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0); std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, // VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( const VertexID v_id, const VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter); inline void reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].get_size_in_bytes(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; // double message_time = 0; // double bp_labeling_time = 0; // double initializing_time = 0; // double scatter_time = 0; // double gather_time = 0; // double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // uint64_t caller_line = 0; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); // bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // {// Batch number limit // if (10 == b_i) { // remainer = 0; // break; // } // } // { ////#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i);//test } ////#endif // } batch_process( G, // b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i_bound);//test } //#endif } batch_process( G, // b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u ", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); // printf("num_hosts: %u host_id: %u\n" // "Local_labeling_time: %.2f seconds\n" // "bp_labeling_time: %.2f %.2f%%\n" // "initializing_time: %.2f %.2f%%\n" // "scatter_time: %.2f %.2f%%\n" // "gather_time: %.2f %.2f%%\n" // "clearup_time: %.2f %.2f%%\n" // "message_time: %.2f %.2f%%\n", // num_hosts, host_id, // time_labeling, // bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, // initializing_time, 100.0 * initializing_time / time_labeling, // scatter_time, 100.0 * scatter_time / time_labeling, // gather_time, 100.0 * gather_time / time_labeling, // clearup_time, 100.0 * clearup_time / time_labeling, // message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("num_hosts: %d " "num_threads: %d " "Global_labeling_time: %.2f seconds\n", num_hosts, NUM_THREADS, global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } // message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { {// Limit the distance if (d > 7) { break; } } //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_MAX, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID size_roots_master_local; VertexID roots_bound = roots_start + roots_size; try { for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); // {//test // if (1024 == roots_start && 7 == host_id && 31600 == *roots_master_local.rbegin()) { // printf("S0.0 host_id: %d " // "31600 YES!\n", // host_id); // } // } } } size_roots_master_local = roots_master_local.size(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_roots_master_local: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } } // // Real Index try { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; Lr.batches.emplace_back( // b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; Lr.batches.emplace_back( // b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_real_index: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Dist Table try { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lr.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send[offsets_beffer_send[i_r] + top_location++] = // LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i], dist); } } } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lr.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i], dist); // buffer for sending // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending } } } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_dist_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build the Bit-Parallel Labels Table try { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_bp_labels_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } { } } // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, // MPI_SUM, MPI_MAX, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); try { buffer_send_labels.resize(size_buffer_send_labels); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc " "host_id: %d " "size_buffer_send_labels: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, size_buffer_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices VertexID tmp_i_q = i_q - start_active_queue; buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i] - roots_start; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue; std::vector<VertexID> sizes_tmp_got_candidates_queue; std::vector<VertexID> tmp_once_candidated_queue; std::vector<VertexID> sizes_tmp_once_candidated_queue; try { tmp_got_candidates_queue.resize(num_ngbrs); sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0); tmp_once_candidated_queue.resize(num_ngbrs); sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.tmp_queues: bad_alloc " "host_id: %d " "num_ngbrs: %lu " "size_indices_buffer: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, num_ngbrs, size_indices_buffer, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter) { const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue; std::vector<VertexID> offsets_tmp_active_queue; std::vector<VertexID> tmp_active_queue; std::vector<VertexID> sizes_tmp_active_queue; std::vector<EdgeID> offsets_tmp_buffer_send; std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; std::vector<EdgeID> sizes_tmp_buffer_send; EdgeID total_send_labels; try { offsets_tmp_active_queue.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } tmp_active_queue.resize(size_got_candidates_queue); sizes_tmp_active_queue.resize(size_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send // std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue); offsets_tmp_buffer_send.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); VertexID tmp_i_q = i_q - start_got_candidates_queue; if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[tmp_i_q] = 0; } } total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); tmp_buffer_send.resize(total_send_labels); sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_tmp_buffer_send: bad_alloc " "host_id: %d " "iter: %u " "size_got_candidates_queue: %u " "total_send_labels: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, host_id, iter, size_got_candidates_queue, total_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID tmp_i_queue = i_queue - start_got_candidates_queue; VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[tmp_i_queue], offsets_tmp_buffer_send[tmp_i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, // b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID old_size_buffer_send = buffer_send.size(); EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); try { buffer_send.resize(total_new + old_size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_buffer_send: bad_alloc " "iter: %u " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, iter, host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, old_size_buffer_send); // zero_size); } } // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels VertexID b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lv.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lv.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; // Cannot use this, because no batch_id any more, so distances are not all in order among batches. if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_seq: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_para: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( const VertexID v_id_local, const VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter) { try { IndexType &Lv = L[v_id_local]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id_local].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin() -> size); } else { short_index[v_id_local].indicator[BATCH_SIZE] = 1; // short_index[v_id_local].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.emplace_back( // b_id, // batch id Lv.distances.size(), // start index 1); // size } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("update_label_indices: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } // Remove labels of local minimum set for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local_id = once_candidated_queue[v_i]; if (!G.is_local_minimum[v_local_id]) { continue; } L[v_local_id].clean_all_indices(); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // The Maximum of active vertices among hosts. VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, // b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); // initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; {// Limit the distance if (iter >7 ) { if (end_active_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { VertexID v_id_local = active_queue[i_q]; is_active[v_id_local] = 0; } } else { for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { VertexID v_id_local = active_queue[i_q]; is_active[v_id_local] = 0; } } end_active_queue = 0; break; } } //#ifdef DEBUG_MESSAGES_ON // {//test //// if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("iter: %u " // "host_id: %d " // "global_num_actives: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // iter, // host_id, // global_num_actives, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); //// } // } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts try { // scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs, to reduce the peak memory footprint. const VertexID chunk_size = 1 << 20; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S0 host_id: %d global_i: %u bound_global_i: %u local_size: %u\n", // host_id, global_i, bound_global_i, local_size); // } // } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; // scatter_time += WallTimer::get_time_mark(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("pushing: bad_alloc " "iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // {//test // if (0 == host_id) { // printf("host_id: %u pushing finished...\n", host_id); // } // } // Traverse vertices in the got_candidates_queue to insert labels { // gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { const VertexID chunk_size = 1 << 16; VertexID remainder = end_got_candidates_queue % chunk_size; VertexID bound_i_q = end_got_candidates_queue - remainder; for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, i_q, chunk_size, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } if (remainder) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, bound_i_q, remainder, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } ////// Backup // // Prepare for parallel active_queue // // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. // std::vector<VertexID> offsets_tmp_active_queue; // std::vector<VertexID> tmp_active_queue; // std::vector<VertexID> sizes_tmp_active_queue; // std::vector<EdgeID> offsets_tmp_buffer_send; // std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; // std::vector<EdgeID> sizes_tmp_buffer_send; // EdgeID total_send_labels; // // try { // offsets_tmp_active_queue.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // offsets_tmp_active_queue[i_q] = i_q; // } // tmp_active_queue.resize(end_got_candidates_queue); // sizes_tmp_active_queue.resize(end_got_candidates_queue, // 0); // Size will only be 0 or 1, but it will become offsets eventually. // // // Prepare for parallel buffer_send //// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); // offsets_tmp_buffer_send.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // VertexID v_id_local = got_candidates_queue[i_q]; // VertexID v_global_id = G.get_global_vertex_id(v_id_local); // if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // // If v_global_id is root, its new labels should be put into buffer_send // offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; // } else { // offsets_tmp_buffer_send[i_q] = 0; // } // } // total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // tmp_buffer_send.resize(total_send_labels); // sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_tmp_buffer_send: bad_alloc " // "host_id: %d " // "iter: %u " // "end_got_candidates_queue: %u " // "total_send_labels: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // host_id, // iter, // end_got_candidates_queue, // total_send_labels, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // //#pragma omp parallel for // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if (distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter)) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; //// active_queue[end_active_queue++] = v_id_local; // tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only_para( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, // tmp_buffer_send, // sizes_tmp_buffer_send[i_queue], // offsets_tmp_buffer_send[i_queue]); //// buffer_send); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, //// short_index, //// b_id, // iter); // } // } // // {// Collect elements from tmp_active_queue to active_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); // PADO::collect_into_queue( // tmp_active_queue, // offsets_tmp_active_queue, // sizes_tmp_active_queue, // total_new, // active_queue, // end_active_queue); // } // {// Collect elements from tmp_buffer_send to buffer_send // EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // try { // buffer_send.resize(total_new); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_buffer_send: bad_alloc " // "iter: %u " // "host_id: %d " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // iter, // host_id, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // EdgeID zero_size = 0; // PADO::collect_into_queue( // tmp_buffer_send, // offsets_tmp_buffer_send, // sizes_tmp_buffer_send, // total_new, // buffer_send, // zero_size); // } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, // b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); try { if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("recved_dist_table: bad_alloc " "host_id: %d " "iter: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, iter, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); reset_at_end( G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table, once_candidated_queue, end_once_candidated_queue); // clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); volatile uint64_t size_buffer_send = 0; if (host_id == root) { size_buffer_send = buffer_send.size(); } // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // {//test // if (0 == root && size_buffer_send == 16 && 1024 == caller_line) { //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // printf("before: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } MPI_Bcast((void *) &size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // {//test //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // if (0 == root && size_buffer_send == 16 && 1024 == caller_line) { // printf("after: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } try { buffer_recv.resize(size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("one_host_bcasts_buffer_to_buffer: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } if (!size_buffer_send) { return; } // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); if (host_id == root) { // buffer_recv.assign(buffer_send.begin(), buffer_send.end()); buffer_recv.swap(buffer_send); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } // message_time += WallTimer::get_time_mark(); } } #endif //PADO_DPADO_H
private-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma=50; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel private(suma) { //El valor de entrada es indefinido de la variable private, por lo que //siempre tenemos que inicializarla. Lo que se modifica en el bloque paralelo //no afecta al valor que tuviese esa variable antes del bloque. suma=0; #pragma omp for for (i=0; i<n; i++) { suma = suma + a[i]; printf("thread %d suma a[%d] / ", omp_get_thread_num(), i); } printf("\n* thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); printf("suma=%d \n",suma); }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<xpu, OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
nanopore_hdp.c
// // nanopore_hdp.c // // // Created by Jordan Eizenga on 1/8/16. // // // in 0-based index #define ALIGNMENT_KMER_COL 9 #define ALIGNMENT_STRAND_COL 4 #define ALIGNMENT_SIGNAL_COL 13 #define NUM_ALIGNMENT_COLS 15 #define MODEL_ROW_HEADER_LENGTH 1 #define MODEL_MEAN_ENTRY 0 #define MODEL_NOISE_ENTRY 1 #define MODEL_ENTRY_LENGTH 5 #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include "pairwiseAligner.h" #include "emissionMatrix.h" #include "hdp.h" #include "hdp_math_utils.h" #include "nanopore_hdp.h" #include "fastCMaths.h" #include "sonLib.h" NanoporeHDP* package_nanopore_hdp(HierarchicalDirichletProcess* hdp, const char* alphabet, int64_t alphabet_size, int64_t kmer_length) { NanoporeHDP* nhdp = (NanoporeHDP*) malloc(sizeof(NanoporeHDP)); // copy and sort alphabet char* internal_alphabet = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { internal_alphabet[i] = alphabet[i]; } int64_t min_idx; char temp; for (int64_t i = 0; i < alphabet_size; i++) { min_idx = i; for (int64_t j = i + 1; j < alphabet_size; j++) { if (internal_alphabet[j] < internal_alphabet[min_idx]) { min_idx = j; } } temp = internal_alphabet[i]; internal_alphabet[i] = internal_alphabet[min_idx]; internal_alphabet[min_idx] = temp; } for (int64_t i = 1; i < alphabet_size; i++) { if (alphabet[i - 1] == alphabet[i]) { fprintf(stderr, "Characters of alphabet must be distinct.\n"); exit(EXIT_FAILURE); } } internal_alphabet[alphabet_size] = '\0'; nhdp->hdp = hdp; nhdp->alphabet = internal_alphabet; nhdp->alphabet_size = alphabet_size; nhdp->kmer_length = kmer_length; // note: destroying the HDP housed in the NHDP will destroy the DistributionMetricMemo nhdp->distr_metric_memos = stSet_construct2(&free); return nhdp; } void destroy_nanopore_hdp(NanoporeHDP* nhdp) { destroy_hier_dir_proc(nhdp->hdp); stSet_destruct(nhdp->distr_metric_memos); free(nhdp->alphabet); free(nhdp); } int64_t get_nanopore_hdp_kmer_length(NanoporeHDP* nhdp) { return nhdp->kmer_length; } int64_t get_nanopore_hdp_alphabet_size(NanoporeHDP* nhdp) { return nhdp->alphabet_size; } char* get_nanopore_hdp_alphabet(NanoporeHDP* nhdp) { char* alphabet = nhdp->alphabet; int64_t alphabet_size = nhdp->alphabet_size; char* copy = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { copy[i] = alphabet[i]; } copy[alphabet_size] = '\0'; return copy; } // wrappers void execute_nhdp_gibbs_sampling(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, bool verbose) { execute_gibbs_sampling(nhdp->hdp, num_samples, burn_in, thinning, verbose); } void execute_nhdp_gibbs_sampling_with_snapshots(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, void (*snapshot_func)(HierarchicalDirichletProcess*, void*), void* snapshot_func_args, bool verbose) { execute_gibbs_sampling_with_snapshots(nhdp->hdp, num_samples, burn_in, thinning, snapshot_func, snapshot_func_args, verbose); } void finalize_nhdp_distributions(NanoporeHDP* nhdp) { finalize_distributions(nhdp->hdp); } void normal_inverse_gamma_params_from_minION(const char* model_filepath, double* mu_out, double* nu_out, double* alpha_out, double* beta_out) { FILE* model_file = fopen(model_filepath, "r"); char* line = stFile_getLineFromFile(model_file); stList* tokens = stString_split(line); int64_t table_length = (stList_length(tokens) - MODEL_ROW_HEADER_LENGTH) / MODEL_ENTRY_LENGTH; double* means = (double*) malloc(sizeof(double) * table_length); double* precisions = (double*) malloc(sizeof(double) * table_length); int64_t mean_offset = MODEL_ROW_HEADER_LENGTH + MODEL_MEAN_ENTRY; int64_t noise_offset = MODEL_ROW_HEADER_LENGTH + MODEL_NOISE_ENTRY; char* mean_str; char* noise_str; double noise; for (int i = 0; i < table_length; i++) { mean_str = (char*) stList_get(tokens, mean_offset + i * MODEL_ENTRY_LENGTH); sscanf(mean_str, "%lf", &(means[i])); noise_str = (char*) stList_get(tokens, noise_offset + i * MODEL_ENTRY_LENGTH); sscanf(mean_str, "%lf", &noise); precisions[i] = 1.0 / (noise * noise); } free(line); stList_destruct(tokens); mle_normal_inverse_gamma_params(means, precisions, table_length, mu_out, nu_out, alpha_out, beta_out); free(means); free(precisions); fclose(model_file); } // fixed concentration parameters 'gamma' for each depth HierarchicalDirichletProcess* minION_hdp(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc(num_dps, depth, gamma, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } // Gamma distribution prior on the concentration parameters 'gamma' // must designate vector of 'alpha' and 'beta' parameters of distribution for each depth HierarchicalDirichletProcess* minION_hdp_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } void update_nhdp_from_alignment(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header) { update_nhdp_from_alignment_with_filter(nhdp, alignment_filepath, has_header, NULL); } void update_nhdp_from_alignment_with_filter(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header, const char* strand_filter) { stList* signal_list = stList_construct3(0, &free); stList* dp_id_list = stList_construct3(0, &free); FILE* align_file = fopen(alignment_filepath, "r"); if (align_file == NULL) { fprintf(stderr, "Alignment %s file does not exist.\n", alignment_filepath); exit(EXIT_FAILURE); } stList* tokens; int64_t line_length; char* kmer; char* strand; char* signal_str; int64_t* dp_id_ptr; double* signal_ptr; bool warned = false; int proceed = 0; char* line = stFile_getLineFromFile(align_file); if (has_header) { line = stFile_getLineFromFile(align_file); } while (line != NULL) { tokens = stString_split(line); line_length = stList_length(tokens); if (!warned) { if (line_length != NUM_ALIGNMENT_COLS) { fprintf(stderr, "Input format has changed from design period, HDP may receive incorrect data.\n"); warned = true; } } strand = (char*) stList_get(tokens, ALIGNMENT_STRAND_COL); if (strand_filter != NULL) { proceed = strcmp(strand, strand_filter); } if (proceed == 0) { signal_str = (char*) stList_get(tokens, ALIGNMENT_SIGNAL_COL); kmer = (char*) stList_get(tokens, ALIGNMENT_KMER_COL); signal_ptr = (double*) malloc(sizeof(double)); dp_id_ptr = (int64_t*) malloc(sizeof(int64_t)); sscanf(signal_str, "%lf", signal_ptr); *dp_id_ptr = kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); stList_append(signal_list, signal_ptr); stList_append(dp_id_list, dp_id_ptr); } stList_destruct(tokens); free(line); line = stFile_getLineFromFile(align_file); } fclose(align_file); int64_t data_length; double* signal = stList_toDoublePtr(signal_list, &data_length); int64_t* dp_ids = stList_toIntPtr(dp_id_list, &data_length); stList_destruct(signal_list); stList_destruct(dp_id_list); reset_hdp_data(nhdp->hdp); pass_data_to_hdp(nhdp->hdp, signal, dp_ids, data_length); } // n^k int64_t power(int64_t n, int64_t k) { int64_t num = 1; for (int64_t i = 0; i < k; i++) { num *= n; } return num; } // ((n k)) int64_t multiset_number(int64_t n, int64_t k) { int64_t num = 1; for (int64_t m = n + k - 1; m >= n; m--) { num *= m; } for (int64_t m = k; m >= 2; m--) { num /= m; } return num; } int64_t* get_word(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * word_length); int64_t id_remainder = word_id; for (int64_t i = 0; i < word_length; i++) { word[word_length - i - 1] = id_remainder % alphabet_size; id_remainder /= alphabet_size; } return word; } int64_t* get_word_multiset(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word(word_id, alphabet_size, word_length); // selection sort 'cause whatever int64_t min_idx; int64_t temp; for (int64_t i = 0; i < word_length; i++) { min_idx = i; for (int64_t j = i + 1; j < word_length; j++) { if (multiset[j] < multiset[min_idx]) { min_idx = j; } } temp = multiset[i]; multiset[i] = multiset[min_idx]; multiset[min_idx] = temp; } return multiset; } int64_t multiset_id_internal(int64_t* tail, int64_t tail_length, int64_t alphabet_min, int64_t alphabet_size) { int64_t head = tail[0]; if (tail_length == 1) { return head - alphabet_min; } int64_t step = 0; for (int64_t i = alphabet_min; i < alphabet_size; i++) { if (head > i) { step += multiset_number(alphabet_size - i, tail_length - 1); } else { return step + multiset_id_internal(&(tail[1]), tail_length - 1, i, alphabet_size); } } fprintf(stderr, "Character outside alphabet included in multiset\n"); exit(EXIT_FAILURE); } int64_t multiset_id(int64_t* multiset, int64_t length, int64_t alphabet_size) { return multiset_id_internal(multiset, length, 0, alphabet_size); } int64_t word_id_to_multiset_id(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word_multiset(word_id, alphabet_size, word_length); int64_t id = multiset_id(multiset, word_length, alphabet_size); free(multiset); return id; } int64_t word_id(int64_t* word, int64_t alphabet_size, int64_t word_length) { int64_t id = 0; int64_t step = 1; for (int64_t i = word_length - 1; i >= 0; i--) { id += step * word[i]; step *= alphabet_size; } return id; } int64_t* kmer_to_word(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * kmer_length); for (int64_t i = 0; i < kmer_length; i++) { int64_t j = 0; while (kmer[i] != alphabet[j]) { j++; if (j == alphabet_size) { fprintf(stderr, "vanillaAlign - ERROR: K-mer contains character outside alphabet. " "Got offending kmer is: %s. alphabet is %s\n", kmer, alphabet); exit(EXIT_FAILURE); } } word[i] = j; } return word; } int64_t kmer_id(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = kmer_to_word(kmer, alphabet, alphabet_size, kmer_length); int64_t id = word_id(word, alphabet_size, kmer_length); free(word); return id; } int64_t standard_kmer_id(char* kmer, int64_t kmer_length) { return kmer_id(kmer, "ACGT", 4, kmer_length); } int64_t nhdp_kmer_id(NanoporeHDP* nhdp, char* kmer) { return kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); } double get_nanopore_kmer_density(NanoporeHDP* nhdp, void *kmer, void *x) { return dir_proc_density(nhdp->hdp, *(double *) x, nhdp_kmer_id(nhdp, (char *)kmer)); } double get_kmer_distr_distance(NanoporeDistributionMetricMemo* memo, char* kmer_1, char* kmer_2) { NanoporeHDP* nhdp = memo->nhdp; return get_dir_proc_distance(memo->memo, nhdp_kmer_id(nhdp, kmer_1), nhdp_kmer_id(nhdp, kmer_2)); } NanoporeDistributionMetricMemo* package_nanopore_metric_memo(NanoporeHDP* nhdp, DistributionMetricMemo* memo) { NanoporeDistributionMetricMemo* nanopore_memo = (NanoporeDistributionMetricMemo*) malloc(sizeof(NanoporeDistributionMetricMemo)); nanopore_memo->nhdp = nhdp; nanopore_memo->memo = memo; return nanopore_memo; } NanoporeDistributionMetricMemo* new_nhdp_kl_divergence_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_kl_divergence_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_hellinger_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_hellinger_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_l2_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_l2_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_shannon_jensen_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_shannon_jensen_distance_memo(nhdp->hdp)); } double compare_nhdp_distrs_kl_divergence(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_kl_divergence(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_l2_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_l2_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_shannon_jensen_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_shannon_jensen_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_hellinger_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_hellinger_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } int64_t flat_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); return num_leaves + 1; } void flat_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t last_dp_id = power(alphabet_size, kmer_length); for (int64_t id = 0; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* flat_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 2); gamma_params[0] = base_gamma; gamma_params[1] = leaf_gamma; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 2, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* flat_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 2); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 2); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = leaf_gamma_beta; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 2, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t multiset_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); return num_leaves + num_middle_dps + 1; } void multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); // set kmer parents to multisets int64_t multiset_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { multiset_id = word_id_to_multiset_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id); } // set multiset parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* multiset_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* multiset_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t middle_2_nts_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { if (kmer_length <= 2) { fprintf(stderr, "k-mer is not long enough for middle 2 nucleotides HDP\n"); exit(EXIT_FAILURE); } return power(alphabet_size, kmer_length) + power(alphabet_size, 2) + 1; } int64_t kmer_id_to_middle_nts_id(int64_t kmer_id, int64_t alphabet_size, int64_t kmer_length) { int64_t* kmer = get_word(kmer_id, alphabet_size, kmer_length); int64_t id = alphabet_size * kmer[kmer_length / 2 - 1] + kmer[kmer_length / 2]; free(kmer); return id; } void middle_2_nts_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = power(alphabet_size, 2); int64_t middle_dp_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { middle_dp_id = kmer_id_to_middle_nts_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, middle_dp_id + num_leaves); } int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t id = num_leaves; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* middle_2_nts_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle two nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* middle_2_nts_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle 2 nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t purine_composition_hdp_num_dps(int64_t num_purines, int64_t num_pyrimidines, int64_t kmer_length) { int64_t num_leaves = power(num_purines + num_pyrimidines, kmer_length); int64_t num_middle_dps = kmer_length + 1; return num_leaves + num_middle_dps + 1; } void purine_composition_hdp_model_internal(HierarchicalDirichletProcess* hdp, bool* purine_alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = kmer_length + 1; // set kmer parents to purine multisets int64_t num_purines; int64_t* word; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { word = get_word(kmer_id, alphabet_size, kmer_length); num_purines = 0; for (int64_t i = 0; i < kmer_length; i++) { if (purine_alphabet[word[i]]) { num_purines++; } } free(word); set_dir_proc_parent(hdp, kmer_id, num_leaves + num_purines); } // set purine set parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* purine_composition_hdp_model(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } NanoporeHDP* purine_composition_hdp_model_2(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < alphabet_size; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } void serialize_nhdp(NanoporeHDP* nhdp, const char* filepath) { FILE* out = fopen(filepath, "w"); fprintf(out, "%"PRId64"\n", nhdp->alphabet_size); fprintf(out, "%s\n", nhdp->alphabet); fprintf(out, "%"PRId64"\n", nhdp->kmer_length); serialize_hdp(nhdp->hdp, out); fclose(out); } NanoporeHDP* deserialize_nhdp(const char* filepath) { //st_uglyf("SENTINAL - deserializing HDP from %s\n", filepath); FILE* in = fopen(filepath, "r"); char* line = stFile_getLineFromFile(in); int64_t alphabet_size; sscanf(line, "%"SCNd64, &alphabet_size); free(line); line = stFile_getLineFromFile(in); char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); sscanf(line, "%s", alphabet); free(line); line = stFile_getLineFromFile(in); int64_t kmer_length; sscanf(line, "%"SCNd64, &kmer_length); free(line); HierarchicalDirichletProcess* hdp = deserialize_hdp(in); fclose(in); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); free(alphabet); return nhdp; } static NanoporeHDP *loadNanoporeHdpFromScratch(NanoporeHdpType nHdpType, const char *modelFile) { if (nHdpType == singleLevelFixed) { NanoporeHDP *nHdp = flat_hdp_model("ACEGOT", SYMBOL_NUMBER_EPIGENETIC_C, KMER_LENGTH, 5.0, 0.5, 30.0, 90.0, 1200, modelFile); return nHdp; } if (nHdpType == singleLevelPrior) { NanoporeHDP *nHdp = flat_hdp_model_2("ACEGOT", SYMBOL_NUMBER_EPIGENETIC_C, KMER_LENGTH, 5.0, 0.5, 5.0, 0.5, // base_alpha, base_beta, leaf_alpha, leaf_beta 30.0, 90, 1200, modelFile); return nHdp; } if (nHdpType == multisetFixed) { NanoporeHDP *nHdp = multiset_hdp_model("ACEGOT", SYMBOL_NUMBER_EPIGENETIC_C, KMER_LENGTH, 1.0, 1.0, 1.0, 30.0, 90.0, 1200, modelFile); return nHdp; } if (nHdpType == multisetPrior) { NanoporeHDP *nHdp = multiset_hdp_model_2("ACEGOT", SYMBOL_NUMBER_EPIGENETIC_C, KMER_LENGTH, 5.0, 0.5, 5.0, 0.5, 5.0, 0.5, 30.0, 90.0, 1200, modelFile); return nHdp; } else { fprintf(stderr, "vanillaAlign - error making HDP from scratch\n"); exit(EXIT_FAILURE); } } void nanoporeHdp_buildNanoporeHdpFromAlignment(NanoporeHdpType type, const char *templateModelFile, const char* complementModelFile, const char *alignments, const char *templateHDP, const char *complementHDP) { fprintf(stderr, "vanillaAlign - Building Nanopore HDP\n"); #pragma omp parallel sections { { fprintf(stderr, "vanillaAlign - Updating Template HDP from alignments...\n"); NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile); update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t"); fprintf(stderr, "vanillaAlign - Running Gibbs for template...\n"); execute_nhdp_gibbs_sampling(nHdpT, 10000, 100000, 100, FALSE); finalize_nhdp_distributions(nHdpT); fprintf(stderr, "vanillaAlign - Serializing template to %s...\n", templateHDP); serialize_nhdp(nHdpT, templateHDP); destroy_nanopore_hdp(nHdpT); } #pragma omp section { fprintf(stderr, "vanillaAlign - Updating Complement HDP from alignments...\n"); NanoporeHDP *nHdpC = loadNanoporeHdpFromScratch(type, complementModelFile); update_nhdp_from_alignment_with_filter(nHdpC, alignments, FALSE, "c"); fprintf(stderr, "vanillaAlign - Running Gibbs for complement...\n"); execute_nhdp_gibbs_sampling(nHdpC, 10000, 100000, 100, FALSE); finalize_nhdp_distributions(nHdpC); fprintf(stderr, "vanillaAlign - Serializing complement to %s...\n", complementHDP); serialize_nhdp(nHdpC, complementHDP); destroy_nanopore_hdp(nHdpC); } } }
vc8.c
#define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) static void inner_block(const float *restrict const f, float *restrict const fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float *restrict const fd_coeff, const int bx, const int by, const int bz, const int blocksize_x, const int blocksize_y, const int blocksize_z) { int x; int y; int z; float f_xx; const int x_start = bx * blocksize_x + 8; const int y_start = by * blocksize_y + 8; const int z_start = bz * blocksize_z + 8; const int x_end = x_start + blocksize_x <= nxi + 8 ? x_start + blocksize_x : nxi + 8; const int y_end = y_start + blocksize_y <= ny - 8 ? y_start + blocksize_y : ny - 8; const int z_end = z_start + blocksize_z <= nz - 8 ? z_start + blocksize_z : nz - 8; for (z = z_start; z < z_end; z++) { for (y = y_start; y < y_end; y++) { for (x = x_start; x < x_end; x++) { f_xx = 3 * fd_coeff[0] * A(f, x, y, z) + fd_coeff[1] * (A(f, x + 1, y, z) + A(f, x - 1, y, z) + A(f, x, y + 1, z) + A(f, x, y - 1, z) + A(f, x, y, z + 1) + A(f, x, y, z - 1)) + fd_coeff[2] * (A(f, x + 2, y, z) + A(f, x - 2, y, z) + A(f, x, y + 2, z) + A(f, x, y - 2, z) + A(f, x, y, z + 2) + A(f, x, y, z - 2)) + fd_coeff[3] * (A(f, x + 3, y, z) + A(f, x - 3, y, z) + A(f, x, y + 3, z) + A(f, x, y - 3, z) + A(f, x, y, z + 3) + A(f, x, y, z - 3)) + fd_coeff[4] * (A(f, x + 4, y, z) + A(f, x - 4, y, z) + A(f, x, y + 4, z) + A(f, x, y - 4, z) + A(f, x, y, z + 4) + A(f, x, y, z - 4)) + fd_coeff[5] * (A(f, x + 5, y, z) + A(f, x - 5, y, z) + A(f, x, y + 5, z) + A(f, x, y - 5, z) + A(f, x, y, z + 5) + A(f, x, y, z - 5)) + fd_coeff[6] * (A(f, x + 6, y, z) + A(f, x - 6, y, z) + A(f, x, y + 6, z) + A(f, x, y - 6, z) + A(f, x, y, z + 6) + A(f, x, y, z - 6)) + fd_coeff[7] * (A(f, x + 7, y, z) + A(f, x - 7, y, z) + A(f, x, y + 7, z) + A(f, x, y - 7, z) + A(f, x, y, z + 7) + A(f, x, y, z - 7)) + fd_coeff[8] * (A(f, x + 8, y, z) + A(f, x - 8, y, z) + A(f, x, y + 8, z) + A(f, x, y - 8, z) + A(f, x, y, z + 8) + A(f, x, y, z - 8)); A(fp, x, y, z) = A(model_padded2_dt2, x, y, z) * f_xx + 2 * A(f, x, y, z) - A(fp, x, y, z); } } } } static void inner(const float *restrict const f, float *restrict const fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const float *restrict const fd_coeff, const int step, const int blocksize_x, const int blocksize_y, const int blocksize_z, const int nbx, const int nby, const int nbz) { int bx; int by; int bz; int i; int sx; int sy; int sz; #pragma omp parallel for default(none) private(by, bx) for (bz = 0; bz < nbz; bz++) { for (by = 0; by < nby; by++) { for (bx = 0; bx < nbx; bx++) { inner_block(f, fp, nx, ny, nz, nxi, model_padded2_dt2, fd_coeff, bx, by, bz, blocksize_x, blocksize_y, blocksize_z); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } } void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; float *tmp; float fd_coeff[9] = { -924708642.0f / 302702400 / (dx * dx), 538137600.0f / 302702400 / (dx * dx), -94174080.0f / 302702400 / (dx * dx), 22830080.0f / 302702400 / (dx * dx), -5350800.0f / 302702400 / (dx * dx), 1053696.0f / 302702400 / (dx * dx), -156800.0f / 302702400 / (dx * dx), 15360.0f / 302702400 / (dx * dx), -735.0f / 302702400 / (dx * dx) }; const int blocksize_x = 128; const int blocksize_y = 8; const int blocksize_z = 8; const int nbx = (int)((float)(nxi) / blocksize_x) + (int)(((nxi) % blocksize_x) != 0); const int nby = (int)((float)(ny - 16) / blocksize_y) + (int)(((ny - 16) % blocksize_y) != 0); const int nbz = (int)((float)(nz - 16) / blocksize_z) + (int)(((nz - 16) % blocksize_z) != 0); for (step = 0; step < num_steps; step++) { inner(f, fp, nx, ny, nz, nxi, model_padded2_dt2, dt, sources, sources_x, sources_y, sources_z, num_sources, source_len, fd_coeff, step, blocksize_x, blocksize_y, blocksize_z, nbx, nby, nbz); tmp = f; f = fp; fp = tmp; } }
GB_unop__identity_int32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int32_int16 // op(A') function: GB_unop_tran__identity_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int32_int16 ( int32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RecordTable.h
/* * Souffle - A Datalog Compiler * Copyright (c) 2020, The Souffle Developers. All rights reserved. * Licensed under the Universal Permissive License v 1.0 as shown at: * - https://opensource.org/licenses/UPL * - <souffle root>/licenses/SOUFFLE-UPL.txt */ /************************************************************************ * * @file RecordTable.h * * Data container implementing a map between records and their references. * Records are separated by arity, i.e., stored in different RecordMaps. * ***********************************************************************/ #pragma once #include "souffle/RamTypes.h" #include "souffle/utility/span.h" #include <cassert> #include <cstddef> #include <limits> #include <memory> #include <unordered_map> #include <utility> #include <vector> namespace souffle { /** @brief Bidirectional mappping between records and record references */ class RecordMap { /** arity of record */ const std::size_t arity; /** hash function for unordered record map */ struct RecordHash { std::size_t operator()(std::vector<RamDomain> record) const { std::size_t seed = 0; std::hash<RamDomain> domainHash; for (RamDomain value : record) { seed ^= domainHash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; /** map from records to references */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal std::unordered_map<std::vector<RamDomain>, RamDomain, RecordHash> recordToIndex; /** array of records; index represents record reference */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal std::vector<std::vector<RamDomain>> indexToRecord; public: explicit RecordMap(std::size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free /** @brief converts record to a record reference */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal RamDomain pack(std::vector<RamDomain> vector) { RamDomain index; #pragma omp critical(record_pack) { auto pos = recordToIndex.find(vector); if (pos != recordToIndex.end()) { index = pos->second; } else { #pragma omp critical(record_unpack) { assert(indexToRecord.size() <= std::numeric_limits<RamUnsigned>::max()); index = ramBitCast(RamUnsigned(indexToRecord.size())); recordToIndex[vector] = index; indexToRecord.push_back(std::move(vector)); } } } return index; } /** @brief convert record pointer to a record reference */ RamDomain pack(const RamDomain* tuple) { // TODO (b-scholz): data is unnecessarily copied // for a successful lookup. To avoid this, we should // compute a hash of the pointer-array and traverse through // the bucket list of the unordered map finding the record. // Note that in case of non-existence, the record still needs to be // copied for the newly created entry but this will be the less // frequent case. std::vector<RamDomain> tmp(arity); for (std::size_t i = 0; i < arity; i++) { tmp[i] = tuple[i]; } return pack(std::move(tmp)); } /** @brief convert record reference to a record pointer */ const RamDomain* unpack(RamDomain index) const { const RamDomain* res; #pragma omp critical(record_unpack) res = indexToRecord[index].data(); return res; } }; class RecordTable { public: RecordTable() = default; virtual ~RecordTable() = default; /** @brief convert record to record reference */ RamDomain pack(const RamDomain* tuple, std::size_t arity) { return lookupArity(arity).pack(tuple); } /** @brief convert record reference to a record */ const RamDomain* unpack(RamDomain ref, std::size_t arity) const { std::unordered_map<std::size_t, RecordMap>::const_iterator iter; #pragma omp critical(RecordTableGetForArity) { // Find a previously emplaced map iter = maps.find(arity); } assert(iter != maps.end() && "Attempting to unpack record for non-existing arity"); return (iter->second).unpack(ref); } private: /** @brief lookup RecordMap for a given arity; if it does not exist, create new RecordMap */ RecordMap& lookupArity(std::size_t arity) { std::unordered_map<std::size_t, RecordMap>::iterator mapsIterator; #pragma omp critical(RecordTableGetForArity) { // This will create a new map if it doesn't exist yet. mapsIterator = maps.emplace(arity, arity).first; } return mapsIterator->second; } /** Arity/RecordMap association */ std::unordered_map<std::size_t, RecordMap> maps; }; /** @brief helper to convert tuple to record reference for the synthesiser */ template <std::size_t Arity> RamDomain pack(RecordTable& recordTab, Tuple<RamDomain, Arity> const& tuple) { return recordTab.pack(tuple.data(), Arity); } /** @brief helper to convert tuple to record reference for the synthesiser */ template <std::size_t Arity> RamDomain pack(RecordTable& recordTab, span<const RamDomain, Arity> tuple) { return recordTab.pack(tuple.data(), Arity); } } // namespace souffle
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-common.h" #include "toplev.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* Language-specific declaration information. */ struct lang_decl GTY(()) { char dummy; }; /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) struct lang_type GTY(()) { /* In a RECORD_TYPE, a sorted array of the fields of the type. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s; /* In an ENUMERAL_TYPE, the min and max values. */ tree enum_min; tree enum_max; /* In a RECORD_TYPE, information specific to Objective-C, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; }; /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a label was defined in a statement expression which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of statement expression nesting and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was defined in the scope of an identifier with variably modified type which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_VM(EXP) \ DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of scopes of identifiers with variably modified type and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_VM(EXP) \ DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; }; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier. */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* The specifier itself. */ tree spec; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128 }; /* A sequence of declaration specifiers in C. */ struct c_declspecs { /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ enum c_typespec_keyword typespec_word; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether a type specifier has been seen. */ BOOL_BITFIELD type_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether a struct, union or enum type either had its content defined by a type specifier in the list or was the first visible declaration of its tag. */ BOOL_BITFIELD tag_defined_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "__thread" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ tree tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A list of VLA sizes from the parameters. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; location_t id_loc; /* Currently only set for cdk_id. */ union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Save and restore the variables in this file and elsewhere that keep track of the progress of compilation of the current function. Used for nested functions. */ struct language_function GTY(()) { struct c_language_function base; tree x_break_label; tree x_cont_label; struct c_switch * GTY((skip)) x_switch_stack; struct c_arg_info * GTY((skip)) arg_info; int returns_value; int returns_null; int returns_abnormally; int warn_about_return_type; }; /* Save lists of labels used or defined in particular contexts. Allocated on the parser obstack. */ struct c_label_list { /* The label at the head of the list. */ tree label; /* The rest of the list. */ struct c_label_list *next; }; /* Statement expression context. */ struct c_label_context_se { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The next outermost context. */ struct c_label_context_se *next; }; /* Context of variably modified declarations. */ struct c_label_context_vm { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The scope of this context. Multiple contexts may be at the same numbered scope, since each variably modified declaration starts a new context. */ unsigned scope; /* The next outermost context. */ struct c_label_context_vm *next; }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern int global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void insert_block (tree); extern void c_expand_body (tree); extern void c_init_decl_processing (void); extern void c_dup_lang_specific_decl (tree); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (tree, tree); extern tree check_for_loop_decls (void); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (tree, location_t); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern void c_maybe_initialize_eh (void); extern void finish_decl (tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (tree, tree, tree); extern struct c_arg_info *get_parm_info (bool); extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree); extern tree groktypename (struct c_type_name *); extern tree grokparm (const struct c_parm *); extern tree implicitly_declare (tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (struct function *); extern void c_pop_function_context (struct function *); extern void push_parm_decl (const struct c_parm *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *, bool); extern tree builtin_function (const char *, tree, int, enum built_in_class, const char *, tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (enum tree_code, tree); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (enum tree_code, tree); extern int c_expand_decl (tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern int c_disregard_inline_limits (tree); extern int c_cannot_inline_tree_fn (tree *); extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern tree c_objc_common_truthvalue_conversion (tree expr); extern bool c_warn_unused_global_decl (tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); #define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \ c_build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern struct c_switch *c_switch_stack; extern struct c_label_context_se *label_context_stack_se; extern struct c_label_context_vm *label_context_stack_vm; extern tree require_complete_type (tree); extern int same_translation_unit_p (tree, tree); extern int comptypes (tree, tree); extern bool c_vla_type_p (tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (tree, tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (struct c_expr); extern tree composite_type (tree, tree); extern tree build_component_ref (tree, tree); extern tree build_array_ref (tree, tree); extern tree build_external_ref (tree, int, location_t); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (struct c_expr); extern struct c_expr c_expr_sizeof_type (struct c_type_name *); extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (tree, tree, tree); extern tree build_compound_expr (tree, tree); extern tree c_cast_expr (struct c_type_name *, tree); extern tree build_c_cast (tree, tree); extern void store_init_value (tree, tree); extern void error_init (const char *); extern void pedwarn_init (const char *); extern void maybe_warn_string_init (tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (int); extern struct c_expr pop_init_level (int); extern void set_init_index (tree, tree); extern void set_init_label (tree); extern void process_init_element (struct c_expr); extern tree build_compound_literal (tree, tree); extern tree c_start_case (tree); extern void c_finish_case (tree); extern tree build_asm_expr (tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern tree c_convert_parm_for_inlining (tree, tree, tree, int); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (tree); extern tree c_process_expr_stmt (tree); extern tree c_finish_expr_stmt (tree); extern tree c_finish_return (tree); extern tree c_finish_bc_stmt (tree *, bool); extern tree c_finish_goto_label (tree); extern tree c_finish_goto_ptr (tree); extern void c_begin_vm_scope (unsigned int); extern void c_end_vm_scope (unsigned int); extern tree c_expr_to_decl (tree, bool *, bool *, bool *); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (tree, tree); extern tree c_finish_omp_clauses (tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Nonzero means we are reading code that came from a system header file. */ extern int system_header_p; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ extern bool c_override_global_bindings_to_false; /* True means we've initialized exception handling. */ extern bool c_eh_initialized_p; /* In c-decl.c */ extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must include this file before toplev.h, not after. */ #if GCC_VERSION >= 4001 #define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); #endif /* ! GCC_C_TREE_H */
GB_binop__rdiv_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64) // A*D function (colscale): GB (_AxD__rdiv_uint64) // D*A function (rowscale): GB (_DxB__rdiv_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64) // C=scalar+B GB (_bind1st__rdiv_uint64) // C=scalar+B' GB (_bind1st_tran__rdiv_uint64) // C=A+scalar GB (_bind2nd__rdiv_uint64) // C=A'+scalar GB (_bind2nd_tran__rdiv_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (y, x, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \ } GrB_Info GB (_bind1st_tran__rdiv_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-task.c
# include "poisson.h" /* #pragma omp task/taskwait version of SWEEP. */ void sweep_task (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int i; int it; int j; double (*f)[nx][ny] = (double (*)[nx][ny])f_; double (*u)[nx][ny] = (double (*)[nx][ny])u_; double (*unew)[nx][ny] = (double (*)[nx][ny])unew_; #pragma omp parallel shared (f, u, unew) private (i, it, j) firstprivate(nx, ny, dx, dy, itold, itnew) #pragma omp single { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (i = 0; i < nx; i++) { #pragma omp task firstprivate(i, ny) private(j) shared(u, unew) for (j = 0; j < ny; j++) { (*u)[i][j] = (*unew)[i][j]; } } #pragma omp taskwait // Compute a new estimate. for (i = 0; i < nx; i++) { #pragma omp task firstprivate(i, dx, dy, nx, ny) private(j) shared(u, unew, f) for (j = 0; j < ny; j++) { if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) { (*unew)[i][j] = (*f)[i][j]; } else { (*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1] + (*u)[i][j-1] + (*u)[i+1][j] + (*f)[i][j] * dx * dy); } } } #pragma omp taskwait } } }
ten_tusscher_2004_epi_S3_2.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_2.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5666381018619,0.00129051605124356,0.779668062709313,0.779476464261432,0.000174942983091856,0.485073219069535,0.00294064377784649,0.999998347537377,1.93402807596828e-08,1.89147579865266e-05,0.999766536540144,1.00702637601413,0.999994286442644,4.67868204274064e-05,0.302079209595859,10.4213056981946,139.366419036882}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.3793865384661,0.000186410749574289,0.000121113650924281,0.000506846995710814,0.225962091956109,0.140516980875224,0.109889354839696,4.57147213592430,0.0152630183984148,1.65236689053833,1099.64269823830,0.000374391607169453,0.513004079808466,0.0169401996097053,0.00333803821185753,6.30360246705429e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
DRB058-jacobikernel-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two parallel for loops within one single parallel region, combined with private() and reduction(). */ #include <stdio.h> #include <math.h> #define MSIZE 200 #include <omp.h> int n = 200; int m = 200; int mits = 1000; double tol = 0.0000000001; double relax = 1.0; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; void initialize() { int i; int j; int xx; int yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } void jacobi() { double omega; int i; int j; int k; double error; double resid; double ax; double ay; double b; omega = relax; /* Initialize coefficients */ dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* X-direction coef */ ax = 1.0 / (dx * dx); /* Y-direction coef */ ay = 1.0 / (dy * dy); /* Central coeff */ b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; error = 10.0 * tol; k = 1; while(k <= mits){ error = 0.0; /* Copy new solution into old */ #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { uold[i][j] = u[i][j]; } } #pragma omp parallel for private (resid,i,j) reduction (+:error) for (i = 1; i <= n - 1 - 1; i += 1) { #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* Error check */ k = k + 1; error = sqrt(error) / (n * m); /* End iteration loop */ } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); } int main() { initialize(); jacobi(); return 0; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; Quantum *p; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; Quantum *q; ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { *(p+1)+=*p; p++; } p++; remaining-=row_size; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { unsigned char *p, *q; ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; unsigned char *p; size_t count, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image,pixels,count,row_size); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; ssize_t i, j; unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; const Quantum *p; ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ if(C[i*ldc + j] != 0.0f) printf("Vall C %f\n", C[i*ldc + j]); C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU extern void save_c_matrix_gpu(int M, int N, int K, float* C); #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); // save_c_matrix_gpu(M, N, K, C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
GB_binop__ne_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint32) // A*D function (colscale): GB (_AxD__ne_uint32) // D*A function (rowscale): GB (_DxB__ne_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint32) // C=scalar+B GB (_bind1st__ne_uint32) // C=scalar+B' GB (_bind1st_tran__ne_uint32) // C=A+scalar GB (_bind2nd__ne_uint32) // C=A'+scalar GB (_bind2nd_tran__ne_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ne_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ne_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_bool) // A.*B function (eWiseMult): GB (_AemultB_08__le_bool) // A.*B function (eWiseMult): GB (_AemultB_02__le_bool) // A.*B function (eWiseMult): GB (_AemultB_04__le_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool) // A*D function (colscale): GB (_AxD__le_bool) // D*A function (rowscale): GB (_DxB__le_bool) // C+=B function (dense accum): GB (_Cdense_accumB__le_bool) // C+=b function (dense accum): GB (_Cdense_accumb__le_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool) // C=scalar+B GB (_bind1st__le_bool) // C=scalar+B' GB (_bind1st_tran__le_bool) // C=A+scalar GB (_bind2nd__le_bool) // C=A'+scalar GB (_bind2nd_tran__le_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vector.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include "inner.h" #include "arithmetic.h" namespace math21 { // B=A+k template<typename VecType1, typename VecType2> void math21_operator_container_add_A_k(const VecType1 &A, NumR k, VecType2 &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = A(i) + k; } } // B=A-k template<typename VecType1, typename T, template<typename> class Container> void math21_operator_container_subtract_A_k(const VecType1 &A, NumR k, Container<T> &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = static_cast<T>(A(i) - k); } } // A=A-k template<typename VecType> void math21_operator_container_subtract_A_k_to(VecType &A, NumR k) { math21_operator_container_subtract_A_k(A, k, A); } // B=k-A template<typename VecType1, typename VecType2> void math21_operator_container_subtract_k_A(NumR k, const VecType1 &A, VecType2 &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = k - A(i); } } // B=k-A template<typename VecType> void math21_operator_container_subtract_k_A_to(NumR k, VecType &A) { math21_operator_container_subtract_k_A(k, A, A); } // C=A-B template<typename VecType1, typename VecType2, typename VecType3> void math21_operator_container_subtract_to_C(const VecType1 &A, const VecType2 &B, VecType3 &C) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); MATH21_ASSERT(A.size() == C.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { C(i) = A(i) - B(i); } MATH21_ASSERT_CHECK_VALUE_TMP(math21_operator_container_isfinite(C)) } // A=A-B template<typename VecType, typename VecType2> void math21_operator_container_subtract_to_A(VecType &A, const VecType2 &B) { math21_operator_container_subtract_to_C(A, B, A); } // B=A-B template<typename VecType, typename VecType2> void math21_operator_container_subtract_to_B(const VecType &A, VecType2 &B) { math21_operator_container_subtract_to_C(A, B, B); } template<typename VecType1, typename VecType2> void math21_operator_container_degree2radian(const VecType1 &A, VecType2 &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = xjdegree2radian(A(i)); } } template<typename VecType1> void math21_operator_container_degree2radian_to(VecType1 &A) { math21_operator_container_degree2radian(A, A); } template<typename VecType1, typename VecType2> void math21_operator_container_radian2degree(const VecType1 &A, VecType2 &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = xjradian2degree(A(i)); } } template<typename VecType1> void math21_operator_container_radian2degree_to(VecType1 &A) { math21_operator_container_radian2degree(A, A); } // B = power(A, p) template<typename VecType1, typename VecType2> void math21_operator_container_power(const VecType1 &A, VecType2 &B, NumR p) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = xjpow(A(i), p); } } // B = power(A, p) template<typename VecType1> void math21_operator_container_power_to(VecType1 &A, NumR p) { math21_operator_container_power(A, A, p); } // C=A/B template<typename VecType1, typename VecType2, typename VecType3> void math21_operator_container_divide_to_C(const VecType1 &A, const VecType2 &B, VecType3 &C) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); MATH21_ASSERT(A.size() == C.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { MATH21_ASSERT(xjabs(B(i)) > MATH21_EPS, "divide zero!" << B(i)); C(i) = A(i) / B(i); } MATH21_ASSERT_CHECK_VALUE_TMP(math21_operator_container_isfinite(C)) } // A=A/B template<typename VecType, typename VecType2> void math21_operator_container_divide_to_A(VecType &A, const VecType2 &B) { math21_operator_container_divide_to_C(A, B, A); } // B=A/B template<typename VecType, typename VecType2> void math21_operator_container_divide_to_B(const VecType &A, VecType2 &B) { math21_operator_container_divide_to_C(A, B, B); } // C=A+B template<typename VecType1, typename VecType2, typename VecType3> void math21_operator_container_addToC(const VecType1 &A, const VecType2 &B, VecType3 &C) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); MATH21_ASSERT(A.size() == C.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { C(i) = A(i) + B(i); } MATH21_ASSERT_CHECK_VALUE_TMP(math21_operator_container_isfinite(C)) } // A=A+B template<typename VecType, typename VecType2> void math21_operator_container_addToA(VecType &A, const VecType2 &B) { math21_operator_container_addToC(A, B, A); } // B=A+B template<typename VecType, typename VecType2> void math21_operator_container_addToB(const VecType &A, VecType2 &B) { math21_operator_container_addToC(A, B, B); } // user should make sure that A(i) doesn't have type NumN when k<0. template<typename VecType> void math21_operator_container_letters(VecType &A, NumZ k = 1, NumN from = 0, NumN to = 0) { MATH21_ASSERT(!A.isEmpty()); NumN i; NumN n = A.size(); if (from == 0) { from = 1; } if (to == 0) { to = n; } MATH21_ASSERT(from >= 1 && from <= to && to <= n) for (i = from; i <= to; ++i) { A(i) = k; ++k; } } // user should make sure that A(i) doesn't have type NumN when k<0. template<typename VecType, typename T> void math21_operator_container_set_value(VecType &A, T k, T step, NumN from = 0, NumN to = 0) { MATH21_ASSERT(!A.isEmpty()); NumN i; NumN n = A.size(); if (from == 0) { from = 1; } if (to == 0) { to = n; } MATH21_ASSERT(from >= 1 && from <= to && to <= n) for (i = from; i <= to; ++i) { A(i) = k; k = k + step; } } // user should make sure that A(i) doesn't have type NumN when k<0. template<typename T, template<typename> class Container> void math21_operator_container_set_num(Container<T> &A, const T &k = 1, NumN from = 0, NumN to = 0) { MATH21_ASSERT(!A.isEmpty()); NumN i; NumN n = A.size(); if (from == 0) { from = 1; } if (to == 0) { to = n; } MATH21_ASSERT(from >= 1 && from <= to && to <= n) for (i = from; i <= to; ++i) { A(i) = k; } } // C=A*B template<typename VecType> void math21_operator_container_SchurProduct(const VecType &A, const VecType &B, VecType &C) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); MATH21_ASSERT(A.size() == C.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { C.at(i) = A(i) * B(i); } } // A=A*B template<typename VecType> void math21_operator_container_SchurProduct_to_A(VecType &A, const VecType &B) { math21_operator_container_SchurProduct(A, B, A); } // B=A*B template<typename VecType> void math21_operator_container_SchurProduct_to_B(const VecType &A, VecType &B) { math21_operator_container_SchurProduct(A, B, B); } // return first min value. template<typename T, template<typename> class Container> T math21_operator_container_min(const Container<T> &m) { NumN i; NumN n = m.size(); MATH21_ASSERT(n >= 1); NumN k = 1; for (i = 2; i <= n; ++i) { if (m(i) < m(k)) { k = i; } } return m(k); } template<typename T, template<typename> class Container> T math21_operator_container_max(const Container<T> &m) { NumN i; NumN n = m.size(); MATH21_ASSERT(n >= 1); NumN k = 1; for (i = 2; i <= n; ++i) { if (m(i) > m(k)) { k = i; } } return m(k); } template<typename VecType> NumN math21_operator_container_argmin(const VecType &m) { NumN i; NumN n = m.size(); MATH21_ASSERT(n >= 1); NumN k = 1; for (i = 2; i <= n; ++i) { if (m(i) < m(k)) { k = i; } } return k; } template<typename T, template<typename> class Container> NumN math21_operator_container_index(const Container<T> &m, const T &x) { NumN n = m.size(); for (NumN i = 1; i <= n; ++i) { if (m(i) == x) { return i; } } return 0; } template<typename T, template<typename> class Container> NumN math21_operator_container_arg(const Container<T> &m, const T &x) { return math21_operator_container_index(m, x); } // argmax from index k = 1 template<typename VecType> NumN math21_operator_container_argmax(const VecType &m, NumN k = 1) { NumN i; NumN n = m.size(); MATH21_ASSERT(k >= 1 && k <= n); for (i = k + 1; i <= n; ++i) { if (m(i) > m(k)) { k = i; } } return k; } //argmax every element of v. template<template<typename> class Container, typename VecType1, typename VecType2> void math21_operator_container_argmax(const Container<VecType1> &v, VecType2 &m) { MATH21_ASSERT(v.size() > 0, "v is empty"); MATH21_ASSERT(m.size() == v.size()); for (NumN i = 1; i <= v.size(); ++i) { m(i) = math21_operator_container_argmax(v(i)); } } template<typename T, template<typename> class Container> T math21_operator_container_multiply_some(const Container<T> &x, NumN n, NumN offset = 0) { if (n == 0) { return 0; } MATH21_ASSERT(offset + n <= x.size()); T sum = 1; //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { sum = sum * x(offset + i); } return sum; } template<typename T, template<typename> class Container> T math21_operator_container_multiply_all(const Container<T> &x) { return math21_operator_container_multiply_some(x, x.size()); } template<typename VecType> NumB math21_operator_container_isEqual(const VecType &x, const VecType &y, NumR epsilon = 0) { if (x.size() != y.size()) { return 0; } NumN n = x.size(); if (n == 0) { return 1; } for (NumN i = 1; i <= n; ++i) { if (!math21_point_isEqual(x(i), y(i), epsilon)) { return 0; } } return 1; } template<typename T, typename VecType> NumB math21_operator_container_isEqual_c_array(const VecType &x, const T *y, NumR epsilon = 0) { NumN n = x.size(); MATH21_ASSERT(n >= 1); NumR tmp; for (NumN i = 1; i <= n; ++i) { tmp = y[i - 1] - x(i); if (xjabs(tmp) > epsilon) { return 0; } } return 1; } template<typename VecType> NumB math21_operator_container_isEqual_num(const VecType &x, NumR k, NumR epsilon = 0) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (!math21_point_isEqual(x(i), k, epsilon)) { return 0; } } return 1; } template<typename VecType> NumB math21_operator_container_isEqualZero(const VecType &x, NumR epsilon = 0) { return math21_operator_container_isEqual_num(x, 0, epsilon); } template<typename VecType> NumB math21_operator_check_container_is_nan(const VecType &x) { NumN n = x.size(); for (NumN i = 1; i <= n; ++i) { if (x(i) != x(i)) { return 1; } } return 0; } template<typename VecType> NumR math21_operator_container_sum(const VecType &A, NumN n) { NumN i; NumR sum = 0; NumN size = A.size(); if (n == 1) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += A(i); } else if (n == 2) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += xjsquare(A(i)); sum = xjsqrt(sum); } else { MATH21_ASSERT(0, "norm other than 1, 2 not supported currently"); } MATH21_ASSERT_FINITE(math21_operator_isfinite(sum)) return sum; } template<typename VecType> NumR math21_operator_container_mean(const VecType &A) { MATH21_ASSERT(!A.isEmpty()); NumR sum = math21_operator_container_sum(A, 1); return sum / A.size(); } template<typename VecType> NumR math21_operator_container_norm(const VecType &A, NumN n) { NumN i; NumR sum = 0; NumN size = A.size(); if (n == 1) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += xjabs(A(i)); } else if (n == 2) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += xjsquare(A(i)); sum = xjsqrt(sum); } else { MATH21_ASSERT(0, "norm other than 1, 2 not supported currently"); } MATH21_ASSERT_FINITE(math21_operator_isfinite(sum)) return sum; } template<typename VecType1, typename VecType2> NumR math21_operator_container_distance(const VecType1 &A, const VecType2 &B, NumR n) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); MATH21_ASSERT(n > 0); NumN i; NumR sum = 0; NumN size = A.size(); if (n == 1) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += xjabs(A(i) - B(i)); } else if (n == 2) { //#pragma omp parallel for for (i = 1; i <= size; ++i) sum += xjsquare(A(i) - B(i)); sum = xjsqrt(sum); } else { for (i = 1; i <= size; ++i) sum += xjpow(xjabs(A(i) - B(i)), n); sum = xjpow(sum, 1 / n); } MATH21_ASSERT_FINITE(math21_operator_isfinite(sum)) return sum; } template<typename VecType> NumR math21_operator_container_InnerProduct(NumR k, const VecType &A, const VecType &B) { MATH21_ASSERT(A.size() == B.size()); if (k == 0) { return 0; } NumN i; NumN n = A.size(); NumR y = 0; //#pragma omp parallel for for (i = 1; i <= n; ++i) { y = y + (A(i) * B(i)); } y = y * k; MATH21_ASSERT_FINITE(math21_operator_isfinite(y)) return y; } template<typename VecType> void math21_operator_container_CrossProduct(const VecType &A, const VecType &B, VecType &C) { MATH21_ASSERT(A.size() == 3); MATH21_ASSERT(A.size() == B.size()); if (C.size() != 3) { C.setSize(3); } C(1) = A(2) * B(3) - A(3) * B(2); C(2) = A(3) * B(1) - A(1) * B(3); C(3) = A(1) * B(2) - A(2) * B(1); } // y = k*x + b template<typename VecType, typename VecType2> void math21_operator_container_linear_kx_b(NumR k, const VecType &x, NumR b, VecType2 &y) { MATH21_ASSERT(!x.isEmpty(), "empty matrix"); MATH21_ASSERT(x.size() == y.size()); NumN i; NumN n = x.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { y(i) = k * x(i) + b; } } // x <- k*x + b template<typename VecType> void math21_operator_container_linear_kx_b_to(NumR k, VecType &x, NumR b) { math21_operator_container_linear_kx_b(k, x, b, x); } // C = k1*A template<typename VecType, typename T, template<typename> class Container> void math21_operator_container_linear(NumR k1, const VecType &A, Container<T> &C) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); MATH21_ASSERT(A.size() == C.size()); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { C(i) = static_cast<T>(k1 * A(i)); } } // B = A/A.norm(n) template<typename VecType, typename VecType2> void math21_operator_container_normalize_to_B(const VecType &A, VecType2 &B, NumN n) { NumR k = math21_operator_container_norm(A, n); MATH21_ASSERT(!math21_operator_num_isEqual(k, 0)); math21_operator_container_linear(1 / k, A, B); } // A = A/A.norm(n) template<typename VecType> void math21_operator_container_normalize_to_A(VecType &A, NumN n) { math21_operator_container_normalize_to_B(A, A, n); } // A = k1*A template<typename VecType> void math21_operator_container_linear_to_A(NumR k1, VecType &A) { math21_operator_container_linear(k1, A, A); } // C = k1*A + k2*B template<typename VecType, typename VecType2, typename T, template<typename> class Container> void math21_operator_container_linear(NumR k1, const VecType &A, NumR k2, const VecType2 &B, Container<T> &C) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); MATH21_ASSERT(A.size() == B.size()); MATH21_ASSERT(A.size() == C.size()); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { C(i) = static_cast<T>(k1 * A(i) + k2 * B(i)); } MATH21_ASSERT_CHECK_VALUE_TMP(math21_operator_container_isfinite(C)) } // E = k1*A + k2*B + k3*C + k4*D template<typename VecType, typename VecType2, typename VecType3, typename VecType4, typename VecType5> void math21_operator_container_linear_to_E( NumR k1, const VecType &A, NumR k2, const VecType2 &B, NumR k3, const VecType3 &C, NumR k4, const VecType4 &D, VecType5 &E) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); MATH21_ASSERT(A.size() == B.size()); MATH21_ASSERT(A.size() == C.size()); MATH21_ASSERT(A.size() == D.size()); MATH21_ASSERT(A.size() == E.size()); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { E(i) = k1 * A(i) + k2 * B(i) + k3 * C(i) + k4 * D(i); } MATH21_ASSERT_CHECK_VALUE_TMP(math21_operator_container_isfinite(E)) } // A = k1*A + k2*B + k3*C + k4*D template<typename VecType, typename VecType2, typename VecType3, typename VecType4> void math21_operator_container_linear_to_A( NumR k1, VecType &A, NumR k2, const VecType2 &B, NumR k3, const VecType3 &C, NumR k4, const VecType4 &D) { math21_operator_container_linear_to_E(k1, A, k2, B, k3, C, k4, D, A); } // A = k1*A + k2*B template<typename VecType, typename VecType2> void math21_operator_container_linear_to_A(NumR k1, VecType &A, const NumR k2, const VecType2 &B) { math21_operator_container_linear(k1, A, k2, B, A); } // B = k1*A + k2*B template<typename VecType, typename VecType2> void math21_operator_container_linear_to_B(NumR k1, const VecType &A, NumR k2, VecType2 &B) { math21_operator_container_linear(k1, A, k2, B, B); } // B=|A| template<typename VecType1, typename VecType2> void math21_operator_container_abs(const VecType1 &A, VecType2 &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B(i) = xjabs(A(i)); } } // B=|A| template<typename VecType1> void math21_operator_container_abs_to(VecType1 &A) { math21_operator_container_abs(A, A); } template<typename VecType, typename VecType2> void math21_operator_container_f_elementwise_unary(const VecType &x, VecType2 &y, NumR (*f)(const NumR &x)) { MATH21_ASSERT(!x.isEmpty(), "empty matrix"); MATH21_ASSERT(x.size() == y.size()); NumN i; NumN n = x.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { y(i) = f(x(i)); } } template<typename VecType1, typename VecType2, typename VecType3, typename VecType4> void math21_operator_container_f_elementwise_binary(const VecType1 &x1, const VecType2 &x2, VecType3 &y, NumR (*f)(const NumR &x1, const NumR &x2), const VecType4 &mask) { MATH21_ASSERT(!x1.isEmpty(), "empty matrix"); MATH21_ASSERT(x1.size() == x2.size()); MATH21_ASSERT(x1.size() == y.size()); NumN i; NumN n = x1.size(); NumB isUseMask = 0; if (mask.size() == y.size()) { isUseMask = 1; } //#pragma omp parallel for for (i = 1; i <= n; ++i) { if (isUseMask) { if (mask(i)) { y(i) = f(x1(i), x2(i)); } } else { y(i) = f(x1(i), x2(i)); } } } template<typename VecType1, typename VecType2, typename VecType3, typename VecType> void math21_operator_container_f_elementwise_ternary(const VecType1 &x1, const VecType2 &x2, const VecType3 &x3, VecType &y, NumR (*f)(const NumR &x1, const NumR &x2, const NumR &x3)) { MATH21_ASSERT(!x1.isEmpty(), "empty matrix"); MATH21_ASSERT(x1.size() == x2.size()); MATH21_ASSERT(x1.size() == x3.size()); MATH21_ASSERT(x1.size() == y.size()); NumN i; NumN n = x1.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { y(i) = f(x1(i), x2(i), x3(i)); } } template<typename VecType1, typename VecType2> void math21_operator_container_cdf_like(const VecType1 &x, VecType2 &y, NumB startZero) { MATH21_ASSERT(y.size() == x.size()); NumN n = x.size(); if (startZero) { for (NumN i = 1; i <= n; ++i) { if (i == 1) { y(i) = 0; } else { y(i) = y(i - 1) + x(i - 1); } } } else { for (NumN i = 1; i <= n; ++i) { if (i == 1) { y(i) = x(i); } else { y(i) = y(i - 1) + x(i); } } } } // v is like cdf in order template<typename VecType> NumN math21_operator_container_cdf_get(const VecType &v, NumR u, NumB includeFirst) { NumN n = v.size(); if (includeFirst) { for (NumN i = 1; i <= n; ++i) { if (u < v(i)) { return i; } } } else { MATH21_ASSERT(n >= 2); n -= 1; for (NumN i = 1; i <= n; ++i) { if (u < v(i + 1)) { return i; } } } return n; } // VecType is VecR template<typename VecType> NumN math21_operator_container_pdf_get(const VecType &v, NumR u) { NumN n = v.size(); // method one /*NumR s = 0; for (NumN i = 1; i <= n; ++i) { s = s + v(i); if (u < s) { return i; } } return n;*/ // method two for (NumN i = 1; i <= n; ++i) { u = u - v(i); if (u <= 0) { return i; } } return n; } // collapse => [-)|(-] template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_dn_to_d3(const Container<T> &d_x, NumN axis_d2, VecType &d) { d.setSize(3); NumN p1; p1 = axis_d2; d(1) = math21_operator_container_multiply_some(d_x, p1 - 1, 0); d(2) = d_x(p1); d(3) = math21_operator_container_multiply_some(d_x, d_x.size() - p1, p1); math21_operator_container_replace_by_number(d, 1, 0); } // collapse => [-)|(-)|(-] template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_dn_to_d5_fix_24(const Container<T> &d_x, NumN axis_d2, NumN axis_d4, VecType &d) { d.setSize(5); NumN p1, p2; p1 = axis_d2; p2 = axis_d4; MATH21_ASSERT(p1 < p2, "collapse axes"); d(1) = math21_operator_container_multiply_some(d_x, p1 - 1, 0); d(2) = d_x(p1); d(3) = math21_operator_container_multiply_some(d_x, p2 - p1 - 1, p1); d(4) = d_x(p2); d(5) = math21_operator_container_multiply_some(d_x, d_x.size() - p2, p2); math21_operator_container_replace_by_number(d, 1, 0); } // fix_2: collapse => [-)|(-](-] // fix_3: collapse => [-)[-)|(-] template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_dn_to_d4_fix_2_or_3( const Container<T> &d_x, NumN axis_d2, NumN axis_d3, VecType &d, NumB fix2) { d.setSize(4); NumN p1, p2; p1 = axis_d2; p2 = axis_d3; MATH21_ASSERT(p1 < p2, "collapse axes"); d(1) = math21_operator_container_multiply_some(d_x, p1 - 1, 0); if (fix2) { d(2) = d_x(p1); d(3) = math21_operator_container_multiply_some(d_x, p2 - p1, p1); } else { // fix 3 d(2) = math21_operator_container_multiply_some(d_x, p2 - p1, p1 - 1); d(3) = d_x(p2); } d(4) = math21_operator_container_multiply_some(d_x, d_x.size() - p2, p2); math21_operator_container_replace_by_number(d, 1, 0); } template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_dn_to_d4_fix_2or3( const Container<T> &d_x, NumN from, NumN to, VecType &d) { NumN p1, p2; p1 = xjmin(from, to); p2 = xjmax(from, to); math21_operator_container_dn_to_d4_fix_2_or_3(d_x, p1, p2, d, from<to); } }
jacobi.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n,m,mits; double tol,relax=1.0,alpha=0.0543; double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; double dx,dy; int main (void) { float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel { #pragma omp for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("Finished %d iteration.\n",k); error = sqrt(error)/(n*m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; double xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; #pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
memory-operations-1.c
#include <assert.h> #define C 55 int i, j, k; static void test_bzero (unsigned size) { unsigned bsize = size * sizeof (int); int *x = __builtin_malloc (bsize); __builtin_memset (x, C, bsize); #pragma omp target map(tofrom: x[:size]) map(from: bsize) { __builtin_bzero (x, bsize); } char *buffer = (char *) x; for (unsigned i = 0; i < bsize; ++i) assert (buffer[i] == 0); } static void test_memcpy (unsigned size) { unsigned bsize = size * sizeof (int); int *x = __builtin_malloc (bsize); __builtin_memset (x, C, bsize); int *y = __builtin_malloc (bsize); #pragma omp target map(tofrom: x[:size], y[:size]) map(from: bsize) { __builtin_memcpy (y, x, bsize); } char *buffer = (char *) y; for (unsigned i = 0; i < bsize; ++i) assert (buffer[i] == C); } static void test_mempcpy (unsigned size) { unsigned bsize = size * sizeof (int); int *x = __builtin_malloc (bsize); __builtin_memset (x, C, bsize); int *y = __builtin_malloc (bsize); int *ptr = 0; #pragma omp target map(tofrom :x[:size], y[:size], ptr) map(from: bsize) { ptr = __builtin_mempcpy (y, x, bsize); } char *buffer = (char *) y; for (unsigned i = 0; i < bsize; ++i) assert (buffer[i] == C); assert (ptr == y + size); } static void test_memset (unsigned size) { unsigned bsize = size * sizeof (int); int *x = __builtin_malloc (bsize); __builtin_bzero (x, bsize); #pragma omp target map(tofrom : x[:size]) map(from: bsize) { __builtin_memset (x, C, bsize); } char *buffer = (char *) x; for (unsigned i = 0; i < bsize; ++i) assert (buffer[i] == C); } int main (void) { unsigned tests[] = {1, 2, 3, 4, 5, 8, 15, 17, 23, 33, 0}; for (unsigned i = 0; tests[i]; i++) { test_bzero (tests[i]); test_memset (tests[i]); test_memcpy (tests[i]); test_mempcpy (tests[i]); } }
subteam.c
#include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ #define NUMELEMENT 10000000 static double a[NUMELEMENT]; static void init(void) { int i=0,j; i=i+5; /*assume onthread 1,3 */ #pragma omp for for (i=0;i<NUMELEMENT;i++) { a[i]=(double)i/2.0; a[i]=(double)i/2.0; a[i]=(double)i/2.0; a[i]=(double)i/2.0; } /*default team, on all threads*/ #pragma omp single { j=omp_get_thread_num(); printf("I am the single one: %d\n",j ); } } int main(void) { #pragma omp parallel { init(); } return 0; }
hsa-gen.c
/* A pass for lowering gimple to HSAIL Copyright (C) 2013-2020 Free Software Foundation, Inc. Contributed by Martin Jambor <[email protected]> and Martin Liska <[email protected]>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "tm.h" #include "is-a.h" #include "hash-table.h" #include "vec.h" #include "tree.h" #include "tree-pass.h" #include "function.h" #include "basic-block.h" #include "cfg.h" #include "fold-const.h" #include "gimple.h" #include "gimple-iterator.h" #include "bitmap.h" #include "dumpfile.h" #include "gimple-pretty-print.h" #include "diagnostic-core.h" #include "gimple-ssa.h" #include "tree-phinodes.h" #include "stringpool.h" #include "tree-vrp.h" #include "tree-ssanames.h" #include "tree-dfa.h" #include "ssa-iterators.h" #include "cgraph.h" #include "print-tree.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "hsa-common.h" #include "cfghooks.h" #include "tree-cfg.h" #include "cfgloop.h" #include "cfganal.h" #include "builtins.h" #include "gomp-constants.h" #include "internal-fn.h" #include "builtins.h" #include "stor-layout.h" #include "stringpool.h" #include "attribs.h" /* Print a warning message and set that we have seen an error. */ #define HSA_SORRY_ATV(location, message, ...) \ do \ { \ hsa_fail_cfun (); \ auto_diagnostic_group d; \ if (warning_at (EXPR_LOCATION (hsa_cfun->m_decl), OPT_Whsa, \ HSA_SORRY_MSG)) \ inform (location, message, __VA_ARGS__); \ } \ while (false) /* Same as previous, but highlight a location. */ #define HSA_SORRY_AT(location, message) \ do \ { \ hsa_fail_cfun (); \ auto_diagnostic_group d; \ if (warning_at (EXPR_LOCATION (hsa_cfun->m_decl), OPT_Whsa, \ HSA_SORRY_MSG)) \ inform (location, message); \ } \ while (false) /* Default number of threads used by kernel dispatch. */ #define HSA_DEFAULT_NUM_THREADS 64 /* Following structures are defined in the final version of HSA specification. */ /* HSA queue packet is shadow structure, originally provided by AMD. */ struct hsa_queue_packet { uint16_t header; uint16_t setup; uint16_t workgroup_size_x; uint16_t workgroup_size_y; uint16_t workgroup_size_z; uint16_t reserved0; uint32_t grid_size_x; uint32_t grid_size_y; uint32_t grid_size_z; uint32_t private_segment_size; uint32_t group_segment_size; uint64_t kernel_object; void *kernarg_address; uint64_t reserved2; uint64_t completion_signal; }; /* HSA queue is shadow structure, originally provided by AMD. */ struct hsa_queue { int type; uint32_t features; void *base_address; uint64_t doorbell_signal; uint32_t size; uint32_t reserved1; uint64_t id; }; static struct obstack hsa_obstack; /* List of pointers to all instructions that come from an object allocator. */ static vec <hsa_insn_basic *> hsa_instructions; /* List of pointers to all operands that come from an object allocator. */ static vec <hsa_op_base *> hsa_operands; hsa_symbol::hsa_symbol () : m_decl (NULL_TREE), m_name (NULL), m_name_number (0), m_directive_offset (0), m_type (BRIG_TYPE_NONE), m_segment (BRIG_SEGMENT_NONE), m_linkage (BRIG_LINKAGE_NONE), m_dim (0), m_cst_value (NULL), m_global_scope_p (false), m_seen_error (false), m_allocation (BRIG_ALLOCATION_AUTOMATIC), m_emitted_to_brig (false) { } hsa_symbol::hsa_symbol (BrigType16_t type, BrigSegment8_t segment, BrigLinkage8_t linkage, bool global_scope_p, BrigAllocation allocation, BrigAlignment8_t align) : m_decl (NULL_TREE), m_name (NULL), m_name_number (0), m_directive_offset (0), m_type (type), m_segment (segment), m_linkage (linkage), m_dim (0), m_cst_value (NULL), m_global_scope_p (global_scope_p), m_seen_error (false), m_allocation (allocation), m_emitted_to_brig (false), m_align (align) { } unsigned HOST_WIDE_INT hsa_symbol::total_byte_size () { unsigned HOST_WIDE_INT s = hsa_type_bit_size (~BRIG_TYPE_ARRAY_MASK & m_type); gcc_assert (s % BITS_PER_UNIT == 0); s /= BITS_PER_UNIT; if (m_dim) s *= m_dim; return s; } /* Forward declaration. */ static BrigType16_t hsa_type_for_tree_type (const_tree type, unsigned HOST_WIDE_INT *dim_p, bool min32int); void hsa_symbol::fillup_for_decl (tree decl) { m_decl = decl; m_type = hsa_type_for_tree_type (TREE_TYPE (decl), &m_dim, false); if (hsa_seen_error ()) { m_seen_error = true; return; } m_align = MAX (m_align, hsa_natural_alignment (m_type)); } /* Constructor of class representing global HSA function/kernel information and state. FNDECL is function declaration, KERNEL_P is true if the function is going to become a HSA kernel. If the function has body, SSA_NAMES_COUNT should be set to number of SSA names used in the function. MODIFIED_CFG is set to true in case we modified control-flow graph of the function. */ hsa_function_representation::hsa_function_representation (tree fdecl, bool kernel_p, unsigned ssa_names_count, bool modified_cfg) : m_name (NULL), m_reg_count (0), m_input_args (vNULL), m_output_arg (NULL), m_spill_symbols (vNULL), m_global_symbols (vNULL), m_private_variables (vNULL), m_called_functions (vNULL), m_called_internal_fns (vNULL), m_hbb_count (0), m_in_ssa (true), m_kern_p (kernel_p), m_declaration_p (false), m_decl (fdecl), m_internal_fn (NULL), m_shadow_reg (NULL), m_kernel_dispatch_count (0), m_maximum_omp_data_size (0), m_seen_error (false), m_temp_symbol_count (0), m_ssa_map (), m_modified_cfg (modified_cfg) { int sym_init_len = (vec_safe_length (cfun->local_decls) / 2) + 1; m_local_symbols = new hash_table <hsa_noop_symbol_hasher> (sym_init_len); m_ssa_map.safe_grow_cleared (ssa_names_count); } /* Constructor of class representing HSA function information that is derived for an internal function. */ hsa_function_representation::hsa_function_representation (hsa_internal_fn *fn) : m_reg_count (0), m_input_args (vNULL), m_output_arg (NULL), m_local_symbols (NULL), m_spill_symbols (vNULL), m_global_symbols (vNULL), m_private_variables (vNULL), m_called_functions (vNULL), m_called_internal_fns (vNULL), m_hbb_count (0), m_in_ssa (true), m_kern_p (false), m_declaration_p (true), m_decl (NULL), m_internal_fn (fn), m_shadow_reg (NULL), m_kernel_dispatch_count (0), m_maximum_omp_data_size (0), m_seen_error (false), m_temp_symbol_count (0), m_ssa_map () {} /* Destructor of class holding function/kernel-wide information and state. */ hsa_function_representation::~hsa_function_representation () { /* Kernel names are deallocated at the end of BRIG output when deallocating hsa_decl_kernel_mapping. */ if (!m_kern_p || m_seen_error) free (m_name); for (unsigned i = 0; i < m_input_args.length (); i++) delete m_input_args[i]; m_input_args.release (); delete m_output_arg; delete m_local_symbols; for (unsigned i = 0; i < m_spill_symbols.length (); i++) delete m_spill_symbols[i]; m_spill_symbols.release (); hsa_symbol *sym; for (unsigned i = 0; i < m_global_symbols.iterate (i, &sym); i++) if (sym->m_linkage != BRIG_ALLOCATION_PROGRAM) delete sym; m_global_symbols.release (); for (unsigned i = 0; i < m_private_variables.length (); i++) delete m_private_variables[i]; m_private_variables.release (); m_called_functions.release (); m_ssa_map.release (); for (unsigned i = 0; i < m_called_internal_fns.length (); i++) delete m_called_internal_fns[i]; } hsa_op_reg * hsa_function_representation::get_shadow_reg () { /* If we compile a function with kernel dispatch and does not set an optimization level, the function won't be inlined and we return NULL. */ if (!m_kern_p) return NULL; if (m_shadow_reg) return m_shadow_reg; /* Append the shadow argument. */ hsa_symbol *shadow = new hsa_symbol (BRIG_TYPE_U64, BRIG_SEGMENT_KERNARG, BRIG_LINKAGE_FUNCTION); m_input_args.safe_push (shadow); shadow->m_name = "hsa_runtime_shadow"; hsa_op_reg *r = new hsa_op_reg (BRIG_TYPE_U64); hsa_op_address *addr = new hsa_op_address (shadow); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, BRIG_TYPE_U64, r, addr); hsa_bb_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun))->append_insn (mem); m_shadow_reg = r; return r; } bool hsa_function_representation::has_shadow_reg_p () { return m_shadow_reg != NULL; } void hsa_function_representation::init_extra_bbs () { hsa_init_new_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); hsa_init_new_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)); } void hsa_function_representation::update_dominance () { if (m_modified_cfg) { free_dominance_info (CDI_DOMINATORS); calculate_dominance_info (CDI_DOMINATORS); } } hsa_symbol * hsa_function_representation::create_hsa_temporary (BrigType16_t type) { hsa_symbol *s = new hsa_symbol (type, BRIG_SEGMENT_PRIVATE, BRIG_LINKAGE_FUNCTION); s->m_name_number = m_temp_symbol_count++; hsa_cfun->m_private_variables.safe_push (s); return s; } BrigLinkage8_t hsa_function_representation::get_linkage () { if (m_internal_fn) return BRIG_LINKAGE_PROGRAM; return m_kern_p || TREE_PUBLIC (m_decl) ? BRIG_LINKAGE_PROGRAM : BRIG_LINKAGE_MODULE; } /* Hash map of simple OMP builtins. */ static hash_map <nofree_string_hash, omp_simple_builtin> *omp_simple_builtins = NULL; /* Warning messages for OMP builtins. */ #define HSA_WARN_LOCK_ROUTINE "support for HSA does not implement OpenMP " \ "lock routines" #define HSA_WARN_TIMING_ROUTINE "support for HSA does not implement OpenMP " \ "timing routines" #define HSA_WARN_MEMORY_ROUTINE "OpenMP device memory library routines have " \ "undefined semantics within target regions, support for HSA ignores them" #define HSA_WARN_AFFINITY "Support for HSA does not implement OpenMP " \ "affinity feateres" /* Initialize hash map with simple OMP builtins. */ static void hsa_init_simple_builtins () { if (omp_simple_builtins != NULL) return; omp_simple_builtins = new hash_map <nofree_string_hash, omp_simple_builtin> (); omp_simple_builtin omp_builtins[] = { omp_simple_builtin ("omp_get_initial_device", NULL, false, new hsa_op_immed (GOMP_DEVICE_HOST, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_is_initial_device", NULL, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_dynamic", NULL, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_set_dynamic", NULL, false, NULL), omp_simple_builtin ("omp_init_lock", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_init_lock_with_hint", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_init_nest_lock_with_hint", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_destroy_lock", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_set_lock", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_unset_lock", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_test_lock", HSA_WARN_LOCK_ROUTINE, true), omp_simple_builtin ("omp_get_wtime", HSA_WARN_TIMING_ROUTINE, true), omp_simple_builtin ("omp_get_wtick", HSA_WARN_TIMING_ROUTINE, true), omp_simple_builtin ("omp_target_alloc", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_U64)), omp_simple_builtin ("omp_target_free", HSA_WARN_MEMORY_ROUTINE, false), omp_simple_builtin ("omp_target_is_present", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_target_memcpy", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_target_memcpy_rect", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_target_associate_ptr", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_target_disassociate_ptr", HSA_WARN_MEMORY_ROUTINE, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_set_max_active_levels", "Support for HSA only allows only one active level, " "call to omp_set_max_active_levels will be ignored " "in the generated HSAIL", false, NULL), omp_simple_builtin ("omp_get_max_active_levels", NULL, false, new hsa_op_immed (1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_in_final", NULL, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_proc_bind", HSA_WARN_AFFINITY, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_num_places", HSA_WARN_AFFINITY, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_place_num_procs", HSA_WARN_AFFINITY, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_place_proc_ids", HSA_WARN_AFFINITY, false, NULL), omp_simple_builtin ("omp_get_place_num", HSA_WARN_AFFINITY, false, new hsa_op_immed (-1, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_partition_num_places", HSA_WARN_AFFINITY, false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_partition_place_nums", HSA_WARN_AFFINITY, false, NULL), omp_simple_builtin ("omp_set_default_device", "omp_set_default_device has undefined semantics " "within target regions, support for HSA ignores it", false, NULL), omp_simple_builtin ("omp_get_default_device", "omp_get_default_device has undefined semantics " "within target regions, support for HSA ignores it", false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_num_devices", "omp_get_num_devices has undefined semantics " "within target regions, support for HSA ignores it", false, new hsa_op_immed (0, (BrigType16_t) BRIG_TYPE_S32)), omp_simple_builtin ("omp_get_num_procs", NULL, true, NULL), omp_simple_builtin ("omp_get_cancellation", NULL, true, NULL), omp_simple_builtin ("omp_set_nested", NULL, true, NULL), omp_simple_builtin ("omp_get_nested", NULL, true, NULL), omp_simple_builtin ("omp_set_schedule", NULL, true, NULL), omp_simple_builtin ("omp_get_schedule", NULL, true, NULL), omp_simple_builtin ("omp_get_thread_limit", NULL, true, NULL), omp_simple_builtin ("omp_get_team_size", NULL, true, NULL), omp_simple_builtin ("omp_get_ancestor_thread_num", NULL, true, NULL), omp_simple_builtin ("omp_get_max_task_priority", NULL, true, NULL) }; unsigned count = sizeof (omp_builtins) / sizeof (omp_simple_builtin); for (unsigned i = 0; i < count; i++) omp_simple_builtins->put (omp_builtins[i].m_name, omp_builtins[i]); } /* Allocate HSA structures that we need only while generating with this. */ static void hsa_init_data_for_cfun () { hsa_init_compilation_unit_data (); gcc_obstack_init (&hsa_obstack); } /* Deinitialize HSA subsystem and free all allocated memory. */ static void hsa_deinit_data_for_cfun (void) { basic_block bb; FOR_ALL_BB_FN (bb, cfun) if (bb->aux) { hsa_bb *hbb = hsa_bb_for_bb (bb); hbb->~hsa_bb (); bb->aux = NULL; } for (unsigned int i = 0; i < hsa_operands.length (); i++) hsa_destroy_operand (hsa_operands[i]); hsa_operands.release (); for (unsigned i = 0; i < hsa_instructions.length (); i++) hsa_destroy_insn (hsa_instructions[i]); hsa_instructions.release (); if (omp_simple_builtins != NULL) { delete omp_simple_builtins; omp_simple_builtins = NULL; } obstack_free (&hsa_obstack, NULL); delete hsa_cfun; } /* Return the type which holds addresses in the given SEGMENT. */ static BrigType16_t hsa_get_segment_addr_type (BrigSegment8_t segment) { switch (segment) { case BRIG_SEGMENT_NONE: gcc_unreachable (); case BRIG_SEGMENT_FLAT: case BRIG_SEGMENT_GLOBAL: case BRIG_SEGMENT_READONLY: case BRIG_SEGMENT_KERNARG: return hsa_machine_large_p () ? BRIG_TYPE_U64 : BRIG_TYPE_U32; case BRIG_SEGMENT_GROUP: case BRIG_SEGMENT_PRIVATE: case BRIG_SEGMENT_SPILL: case BRIG_SEGMENT_ARG: return BRIG_TYPE_U32; } gcc_unreachable (); } /* Return integer brig type according to provided SIZE in bytes. If SIGN is set to true, return signed integer type. */ static BrigType16_t get_integer_type_by_bytes (unsigned size, bool sign) { if (sign) switch (size) { case 1: return BRIG_TYPE_S8; case 2: return BRIG_TYPE_S16; case 4: return BRIG_TYPE_S32; case 8: return BRIG_TYPE_S64; default: break; } else switch (size) { case 1: return BRIG_TYPE_U8; case 2: return BRIG_TYPE_U16; case 4: return BRIG_TYPE_U32; case 8: return BRIG_TYPE_U64; default: break; } return 0; } /* If T points to an integral type smaller than 32 bits, change it to a 32bit equivalent and return the result. Otherwise just return the result. */ static BrigType16_t hsa_extend_inttype_to_32bit (BrigType16_t t) { if (t == BRIG_TYPE_U8 || t == BRIG_TYPE_U16) return BRIG_TYPE_U32; else if (t == BRIG_TYPE_S8 || t == BRIG_TYPE_S16) return BRIG_TYPE_S32; return t; } /* Return HSA type for tree TYPE, which has to fit into BrigType16_t. Pointers are assumed to use flat addressing. If min32int is true, always expand integer types to one that has at least 32 bits. */ static BrigType16_t hsa_type_for_scalar_tree_type (const_tree type, bool min32int) { HOST_WIDE_INT bsize; const_tree base; BrigType16_t res = BRIG_TYPE_NONE; gcc_checking_assert (TYPE_P (type)); gcc_checking_assert (!AGGREGATE_TYPE_P (type)); if (POINTER_TYPE_P (type)) return hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); if (TREE_CODE (type) == VECTOR_TYPE) base = TREE_TYPE (type); else if (TREE_CODE (type) == COMPLEX_TYPE) { base = TREE_TYPE (type); min32int = true; } else base = type; if (!tree_fits_uhwi_p (TYPE_SIZE (base))) { HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not implement huge or " "variable-sized type %qT", type); return res; } bsize = tree_to_uhwi (TYPE_SIZE (base)); unsigned byte_size = bsize / BITS_PER_UNIT; if (INTEGRAL_TYPE_P (base)) res = get_integer_type_by_bytes (byte_size, !TYPE_UNSIGNED (base)); else if (SCALAR_FLOAT_TYPE_P (base)) { switch (bsize) { case 16: res = BRIG_TYPE_F16; break; case 32: res = BRIG_TYPE_F32; break; case 64: res = BRIG_TYPE_F64; break; default: break; } } if (res == BRIG_TYPE_NONE) { HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not implement type %qT", type); return res; } if (TREE_CODE (type) == VECTOR_TYPE) { HOST_WIDE_INT tsize = tree_to_uhwi (TYPE_SIZE (type)); if (bsize == tsize) { HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not implement a vector type " "where a type and unit size are equal: %qT", type); return res; } switch (tsize) { case 32: res |= BRIG_TYPE_PACK_32; break; case 64: res |= BRIG_TYPE_PACK_64; break; case 128: res |= BRIG_TYPE_PACK_128; break; default: HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not implement type %qT", type); } } if (min32int) /* Registers/immediate operands can only be 32bit or more except for f16. */ res = hsa_extend_inttype_to_32bit (res); if (TREE_CODE (type) == COMPLEX_TYPE) { unsigned bsize = 2 * hsa_type_bit_size (res); res = hsa_bittype_for_bitsize (bsize); } return res; } /* Returns the BRIG type we need to load/store entities of TYPE. */ static BrigType16_t mem_type_for_type (BrigType16_t type) { /* HSA has non-intuitive constraints on load/store types. If it's a bit-type it _must_ be B128, if it's not a bit-type it must be 64bit max. So for loading entities of 128 bits (e.g. vectors) we have to use B128, while for loading the rest we have to use the input type (??? or maybe also flattened to a equally sized non-vector unsigned type?). */ if ((type & BRIG_TYPE_PACK_MASK) == BRIG_TYPE_PACK_128) return BRIG_TYPE_B128; else if (hsa_btype_p (type) || hsa_type_packed_p (type)) { unsigned bitsize = hsa_type_bit_size (type); if (bitsize < 128) return hsa_uint_for_bitsize (bitsize); else return hsa_bittype_for_bitsize (bitsize); } return type; } /* Return HSA type for tree TYPE. If it cannot fit into BrigType16_t, some kind of array will be generated, setting DIM appropriately. Otherwise, it will be set to zero. */ static BrigType16_t hsa_type_for_tree_type (const_tree type, unsigned HOST_WIDE_INT *dim_p = NULL, bool min32int = false) { gcc_checking_assert (TYPE_P (type)); if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not " "implement huge or variable-sized type %qT", type); return BRIG_TYPE_NONE; } if (RECORD_OR_UNION_TYPE_P (type)) { if (dim_p) *dim_p = tree_to_uhwi (TYPE_SIZE_UNIT (type)); return BRIG_TYPE_U8 | BRIG_TYPE_ARRAY; } if (TREE_CODE (type) == ARRAY_TYPE) { /* We try to be nice and use the real base-type when this is an array of scalars and only resort to an array of bytes if the type is more complex. */ unsigned HOST_WIDE_INT dim = 1; while (TREE_CODE (type) == ARRAY_TYPE) { tree domain = TYPE_DOMAIN (type); if (!TYPE_MIN_VALUE (domain) || !TYPE_MAX_VALUE (domain) || !tree_fits_shwi_p (TYPE_MIN_VALUE (domain)) || !tree_fits_shwi_p (TYPE_MAX_VALUE (domain))) { HSA_SORRY_ATV (EXPR_LOCATION (type), "support for HSA does not implement array " "%qT with unknown bounds", type); return BRIG_TYPE_NONE; } HOST_WIDE_INT min = tree_to_shwi (TYPE_MIN_VALUE (domain)); HOST_WIDE_INT max = tree_to_shwi (TYPE_MAX_VALUE (domain)); dim = dim * (unsigned HOST_WIDE_INT) (max - min + 1); type = TREE_TYPE (type); } BrigType16_t res; if (RECORD_OR_UNION_TYPE_P (type)) { dim = dim * tree_to_uhwi (TYPE_SIZE_UNIT (type)); res = BRIG_TYPE_U8; } else res = hsa_type_for_scalar_tree_type (type, false); if (dim_p) *dim_p = dim; return res | BRIG_TYPE_ARRAY; } /* Scalar case: */ if (dim_p) *dim_p = 0; return hsa_type_for_scalar_tree_type (type, min32int); } /* Returns true if converting from STYPE into DTYPE needs the _CVT opcode. If false a normal _MOV is enough. */ static bool hsa_needs_cvt (BrigType16_t dtype, BrigType16_t stype) { if (hsa_btype_p (dtype)) return false; /* float <-> int conversions are real converts. */ if (hsa_type_float_p (dtype) != hsa_type_float_p (stype)) return true; /* When both types have different size, then we need CVT as well. */ if (hsa_type_bit_size (dtype) != hsa_type_bit_size (stype)) return true; return false; } /* Return declaration name if it exists or create one from UID if it does not. If DECL is a local variable, make UID part of its name. */ const char * hsa_get_declaration_name (tree decl) { if (!DECL_NAME (decl)) { char buf[64]; snprintf (buf, 64, "__hsa_anon_%u", DECL_UID (decl)); size_t len = strlen (buf); char *copy = (char *) obstack_alloc (&hsa_obstack, len + 1); memcpy (copy, buf, len + 1); return copy; } tree name_tree; if (TREE_CODE (decl) == FUNCTION_DECL || (TREE_CODE (decl) == VAR_DECL && is_global_var (decl))) name_tree = DECL_ASSEMBLER_NAME (decl); else name_tree = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (name_tree); /* User-defined assembly names have prepended asterisk symbol. */ if (name[0] == '*') name++; if ((TREE_CODE (decl) == VAR_DECL) && decl_function_context (decl)) { size_t len = strlen (name); char *buf = (char *) alloca (len + 32); snprintf (buf, len + 32, "%s_%u", name, DECL_UID (decl)); len = strlen (buf); char *copy = (char *) obstack_alloc (&hsa_obstack, len + 1); memcpy (copy, buf, len + 1); return copy; } else return name; } /* Lookup or create the associated hsa_symbol structure with a given VAR_DECL or lookup the hsa_structure corresponding to a PARM_DECL. */ static hsa_symbol * get_symbol_for_decl (tree decl) { hsa_symbol **slot; hsa_symbol dummy (BRIG_TYPE_NONE, BRIG_SEGMENT_NONE, BRIG_LINKAGE_NONE); gcc_assert (TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL || TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL); dummy.m_decl = decl; bool is_in_global_vars = ((TREE_CODE (decl) == VAR_DECL) && !decl_function_context (decl)); if (is_in_global_vars) slot = hsa_global_variable_symbols->find_slot (&dummy, INSERT); else slot = hsa_cfun->m_local_symbols->find_slot (&dummy, INSERT); gcc_checking_assert (slot); if (*slot) { hsa_symbol *sym = (*slot); /* If the symbol is problematic, mark current function also as problematic. */ if (sym->m_seen_error) hsa_fail_cfun (); /* PR hsa/70234: If a global variable was marked to be emitted, but HSAIL generation of a function using the variable fails, we should retry to emit the variable in context of a different function. Iterate elements whether a symbol is already in m_global_symbols of not. */ if (is_in_global_vars && !sym->m_emitted_to_brig) { for (unsigned i = 0; i < hsa_cfun->m_global_symbols.length (); i++) if (hsa_cfun->m_global_symbols[i] == sym) return *slot; hsa_cfun->m_global_symbols.safe_push (sym); } return *slot; } else { hsa_symbol *sym; /* PARM_DECLs and RESULT_DECL should be already in m_local_symbols. */ gcc_assert (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL); BrigAlignment8_t align = hsa_object_alignment (decl); if (is_in_global_vars) { gcc_checking_assert (TREE_CODE (decl) != CONST_DECL); sym = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_GLOBAL, BRIG_LINKAGE_PROGRAM, true, BRIG_ALLOCATION_PROGRAM, align); hsa_cfun->m_global_symbols.safe_push (sym); sym->fillup_for_decl (decl); if (sym->m_align > align) { sym->m_seen_error = true; HSA_SORRY_ATV (EXPR_LOCATION (decl), "HSA specification requires that %E is at least " "naturally aligned", decl); } } else { /* As generation of efficient memory copy instructions relies on alignment greater or equal to 8 bytes, we need to increase alignment of all aggregate types.. */ if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) align = MAX ((BrigAlignment8_t) BRIG_ALIGNMENT_8, align); BrigAllocation allocation = BRIG_ALLOCATION_AUTOMATIC; BrigSegment8_t segment; if (TREE_CODE (decl) == CONST_DECL) { segment = BRIG_SEGMENT_READONLY; allocation = BRIG_ALLOCATION_AGENT; } else if (lookup_attribute ("hsa_group_segment", DECL_ATTRIBUTES (decl))) segment = BRIG_SEGMENT_GROUP; else if (TREE_STATIC (decl)) { segment = BRIG_SEGMENT_GLOBAL; allocation = BRIG_ALLOCATION_PROGRAM; } else if (lookup_attribute ("hsa_global_segment", DECL_ATTRIBUTES (decl))) segment = BRIG_SEGMENT_GLOBAL; else segment = BRIG_SEGMENT_PRIVATE; sym = new hsa_symbol (BRIG_TYPE_NONE, segment, BRIG_LINKAGE_FUNCTION, false, allocation, align); sym->fillup_for_decl (decl); hsa_cfun->m_private_variables.safe_push (sym); } sym->m_name = hsa_get_declaration_name (decl); *slot = sym; return sym; } } /* For a given HSA function declaration, return a host function declaration. */ tree hsa_get_host_function (tree decl) { hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (decl)); gcc_assert (s->m_gpu_implementation_p); return s->m_bound_function ? s->m_bound_function->decl : NULL; } /* Return true if function DECL has a host equivalent function. */ static char * get_brig_function_name (tree decl) { tree d = decl; hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (d)); if (s != NULL && s->m_gpu_implementation_p && s->m_bound_function) d = s->m_bound_function->decl; /* IPA split can create a function that has no host equivalent. */ if (d == NULL) d = decl; char *name = xstrdup (hsa_get_declaration_name (d)); hsa_sanitize_name (name); return name; } /* Create a spill symbol of type TYPE. */ hsa_symbol * hsa_get_spill_symbol (BrigType16_t type) { hsa_symbol *sym = new hsa_symbol (type, BRIG_SEGMENT_SPILL, BRIG_LINKAGE_FUNCTION); hsa_cfun->m_spill_symbols.safe_push (sym); return sym; } /* Create a symbol for a read-only string constant. */ hsa_symbol * hsa_get_string_cst_symbol (tree string_cst) { gcc_checking_assert (TREE_CODE (string_cst) == STRING_CST); hsa_symbol **slot = hsa_cfun->m_string_constants_map.get (string_cst); if (slot) return *slot; hsa_op_immed *cst = new hsa_op_immed (string_cst); hsa_symbol *sym = new hsa_symbol (cst->m_type, BRIG_SEGMENT_GLOBAL, BRIG_LINKAGE_MODULE, true, BRIG_ALLOCATION_AGENT); sym->m_cst_value = cst; sym->m_dim = TREE_STRING_LENGTH (string_cst); sym->m_name_number = hsa_cfun->m_global_symbols.length (); hsa_cfun->m_global_symbols.safe_push (sym); hsa_cfun->m_string_constants_map.put (string_cst, sym); return sym; } /* Make the type of a MOV instruction larger if mandated by HSAIL rules. */ static void hsa_fixup_mov_insn_type (hsa_insn_basic *insn) { insn->m_type = hsa_extend_inttype_to_32bit (insn->m_type); if (insn->m_type == BRIG_TYPE_B8 || insn->m_type == BRIG_TYPE_B16) insn->m_type = BRIG_TYPE_B32; } /* Constructor of the ancestor of all operands. K is BRIG kind that identified what the operator is. */ hsa_op_base::hsa_op_base (BrigKind16_t k) : m_next (NULL), m_brig_op_offset (0), m_kind (k) { hsa_operands.safe_push (this); } /* Constructor of ancestor of all operands which have a type. K is BRIG kind that identified what the operator is. T is the type of the operator. */ hsa_op_with_type::hsa_op_with_type (BrigKind16_t k, BrigType16_t t) : hsa_op_base (k), m_type (t) { } hsa_op_with_type * hsa_op_with_type::get_in_type (BrigType16_t dtype, hsa_bb *hbb) { if (m_type == dtype) return this; hsa_op_reg *dest; if (hsa_needs_cvt (dtype, m_type)) { dest = new hsa_op_reg (dtype); hbb->append_insn (new hsa_insn_cvt (dest, this)); } else if (is_a <hsa_op_reg *> (this)) { /* In the end, HSA registers do not really have types, only sizes, so if the sizes match, we can use the register directly. */ gcc_checking_assert (hsa_type_bit_size (dtype) == hsa_type_bit_size (m_type)); return this; } else { dest = new hsa_op_reg (m_type); hsa_insn_basic *mov = new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, dest, this); hsa_fixup_mov_insn_type (mov); hbb->append_insn (mov); /* We cannot simply for instance: 'mov_u32 $_3, 48 (s32)' because type of the operand must be same as type of the instruction. */ dest->m_type = dtype; } return dest; } /* If this operand has integer type smaller than 32 bits, extend it to 32 bits, adding instructions to HBB if needed. */ hsa_op_with_type * hsa_op_with_type::extend_int_to_32bit (hsa_bb *hbb) { if (m_type == BRIG_TYPE_U8 || m_type == BRIG_TYPE_U16) return get_in_type (BRIG_TYPE_U32, hbb); else if (m_type == BRIG_TYPE_S8 || m_type == BRIG_TYPE_S16) return get_in_type (BRIG_TYPE_S32, hbb); else return this; } /* Constructor of class representing HSA immediate values. TREE_VAL is the tree representation of the immediate value. If min32int is true, always expand integer types to one that has at least 32 bits. */ hsa_op_immed::hsa_op_immed (tree tree_val, bool min32int) : hsa_op_with_type (BRIG_KIND_OPERAND_CONSTANT_BYTES, hsa_type_for_tree_type (TREE_TYPE (tree_val), NULL, min32int)) { if (hsa_seen_error ()) return; gcc_checking_assert ((is_gimple_min_invariant (tree_val) && (!POINTER_TYPE_P (TREE_TYPE (tree_val)) || TREE_CODE (tree_val) == INTEGER_CST)) || TREE_CODE (tree_val) == CONSTRUCTOR); m_tree_value = tree_val; /* Verify that all elements of a constructor are constants. */ if (TREE_CODE (m_tree_value) == CONSTRUCTOR) for (unsigned i = 0; i < CONSTRUCTOR_NELTS (m_tree_value); i++) { tree v = CONSTRUCTOR_ELT (m_tree_value, i)->value; if (!CONSTANT_CLASS_P (v)) { HSA_SORRY_AT (EXPR_LOCATION (tree_val), "HSA ctor should have only constants"); return; } } } /* Constructor of class representing HSA immediate values. INTEGER_VALUE is the integer representation of the immediate value. TYPE is BRIG type. */ hsa_op_immed::hsa_op_immed (HOST_WIDE_INT integer_value, BrigType16_t type) : hsa_op_with_type (BRIG_KIND_OPERAND_CONSTANT_BYTES, type), m_tree_value (NULL) { gcc_assert (hsa_type_integer_p (type)); m_int_value = integer_value; } hsa_op_immed::hsa_op_immed () : hsa_op_with_type (BRIG_KIND_NONE, BRIG_TYPE_NONE) { } /* New operator to allocate immediate operands from obstack. */ void * hsa_op_immed::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } /* Destructor. */ hsa_op_immed::~hsa_op_immed () { } /* Change type of the immediate value to T. */ void hsa_op_immed::set_type (BrigType16_t t) { m_type = t; } /* Constructor of class representing HSA registers and pseudo-registers. T is the BRIG type of the new register. */ hsa_op_reg::hsa_op_reg (BrigType16_t t) : hsa_op_with_type (BRIG_KIND_OPERAND_REGISTER, t), m_gimple_ssa (NULL_TREE), m_def_insn (NULL), m_spill_sym (NULL), m_order (hsa_cfun->m_reg_count++), m_lr_begin (0), m_lr_end (0), m_reg_class (0), m_hard_num (0) { } /* New operator to allocate a register from obstack. */ void * hsa_op_reg::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } /* Verify register operand. */ void hsa_op_reg::verify_ssa () { /* Verify that each HSA register has a definition assigned. Exceptions are VAR_DECL and PARM_DECL that are a default definition. */ gcc_checking_assert (m_def_insn || (m_gimple_ssa != NULL && (!SSA_NAME_VAR (m_gimple_ssa) || (TREE_CODE (SSA_NAME_VAR (m_gimple_ssa)) != PARM_DECL)) && SSA_NAME_IS_DEFAULT_DEF (m_gimple_ssa))); /* Verify that every use of the register is really present in an instruction. */ for (unsigned i = 0; i < m_uses.length (); i++) { hsa_insn_basic *use = m_uses[i]; bool is_visited = false; for (unsigned j = 0; j < use->operand_count (); j++) { hsa_op_base *u = use->get_op (j); hsa_op_address *addr; addr = dyn_cast <hsa_op_address *> (u); if (addr && addr->m_reg) u = addr->m_reg; if (u == this) { bool r = !addr && use->op_output_p (j); if (r) { error ("HSA SSA name defined by instruction that is supposed " "to be using it"); debug_hsa_operand (this); debug_hsa_insn (use); internal_error ("HSA SSA verification failed"); } is_visited = true; } } if (!is_visited) { error ("HSA SSA name not among operands of instruction that is " "supposed to use it"); debug_hsa_operand (this); debug_hsa_insn (use); internal_error ("HSA SSA verification failed"); } } } hsa_op_address::hsa_op_address (hsa_symbol *sym, hsa_op_reg *r, HOST_WIDE_INT offset) : hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (sym), m_reg (r), m_imm_offset (offset) { } hsa_op_address::hsa_op_address (hsa_symbol *sym, HOST_WIDE_INT offset) : hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (sym), m_reg (NULL), m_imm_offset (offset) { } hsa_op_address::hsa_op_address (hsa_op_reg *r, HOST_WIDE_INT offset) : hsa_op_base (BRIG_KIND_OPERAND_ADDRESS), m_symbol (NULL), m_reg (r), m_imm_offset (offset) { } /* New operator to allocate address operands from obstack. */ void * hsa_op_address::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } /* Constructor of an operand referring to HSAIL code. */ hsa_op_code_ref::hsa_op_code_ref () : hsa_op_base (BRIG_KIND_OPERAND_CODE_REF), m_directive_offset (0) { } /* Constructor of an operand representing a code list. Set it up so that it can contain ELEMENTS number of elements. */ hsa_op_code_list::hsa_op_code_list (unsigned elements) : hsa_op_base (BRIG_KIND_OPERAND_CODE_LIST) { m_offsets.create (1); m_offsets.safe_grow_cleared (elements); } /* New operator to allocate code list operands from obstack. */ void * hsa_op_code_list::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } /* Constructor of an operand representing an operand list. Set it up so that it can contain ELEMENTS number of elements. */ hsa_op_operand_list::hsa_op_operand_list (unsigned elements) : hsa_op_base (BRIG_KIND_OPERAND_OPERAND_LIST) { m_offsets.create (elements); m_offsets.safe_grow (elements); } /* New operator to allocate operand list operands from obstack. */ void * hsa_op_operand_list::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } hsa_op_operand_list::~hsa_op_operand_list () { m_offsets.release (); } hsa_op_reg * hsa_function_representation::reg_for_gimple_ssa (tree ssa) { hsa_op_reg *hreg; gcc_checking_assert (TREE_CODE (ssa) == SSA_NAME); if (m_ssa_map[SSA_NAME_VERSION (ssa)]) return m_ssa_map[SSA_NAME_VERSION (ssa)]; hreg = new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (ssa), false)); hreg->m_gimple_ssa = ssa; m_ssa_map[SSA_NAME_VERSION (ssa)] = hreg; return hreg; } void hsa_op_reg::set_definition (hsa_insn_basic *insn) { if (hsa_cfun->m_in_ssa) { gcc_checking_assert (!m_def_insn); m_def_insn = insn; } else m_def_insn = NULL; } /* Constructor of the class which is the bases of all instructions and directly represents the most basic ones. NOPS is the number of operands that the operand vector will contain (and which will be cleared). OP is the opcode of the instruction. This constructor does not set type. */ hsa_insn_basic::hsa_insn_basic (unsigned nops, int opc) : m_prev (NULL), m_next (NULL), m_bb (NULL), m_opcode (opc), m_number (0), m_type (BRIG_TYPE_NONE), m_brig_offset (0) { if (nops > 0) m_operands.safe_grow_cleared (nops); hsa_instructions.safe_push (this); } /* Make OP the operand number INDEX of operands of this instruction. If OP is a register or an address containing a register, then either set the definition of the register to this instruction if it an output operand or add this instruction to the uses if it is an input one. */ void hsa_insn_basic::set_op (int index, hsa_op_base *op) { /* Each address operand is always use. */ hsa_op_address *addr = dyn_cast <hsa_op_address *> (op); if (addr && addr->m_reg) addr->m_reg->m_uses.safe_push (this); else { hsa_op_reg *reg = dyn_cast <hsa_op_reg *> (op); if (reg) { if (op_output_p (index)) reg->set_definition (this); else reg->m_uses.safe_push (this); } } m_operands[index] = op; } /* Get INDEX-th operand of the instruction. */ hsa_op_base * hsa_insn_basic::get_op (int index) { return m_operands[index]; } /* Get address of INDEX-th operand of the instruction. */ hsa_op_base ** hsa_insn_basic::get_op_addr (int index) { return &m_operands[index]; } /* Get number of operands of the instruction. */ unsigned int hsa_insn_basic::operand_count () { return m_operands.length (); } /* Constructor of the class which is the bases of all instructions and directly represents the most basic ones. NOPS is the number of operands that the operand vector will contain (and which will be cleared). OPC is the opcode of the instruction, T is the type of the instruction. */ hsa_insn_basic::hsa_insn_basic (unsigned nops, int opc, BrigType16_t t, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : m_prev (NULL), m_next (NULL), m_bb (NULL), m_opcode (opc),m_number (0), m_type (t), m_brig_offset (0) { if (nops > 0) m_operands.safe_grow_cleared (nops); if (arg0 != NULL) { gcc_checking_assert (nops >= 1); set_op (0, arg0); } if (arg1 != NULL) { gcc_checking_assert (nops >= 2); set_op (1, arg1); } if (arg2 != NULL) { gcc_checking_assert (nops >= 3); set_op (2, arg2); } if (arg3 != NULL) { gcc_checking_assert (nops >= 4); set_op (3, arg3); } hsa_instructions.safe_push (this); } /* New operator to allocate basic instruction from obstack. */ void * hsa_insn_basic::operator new (size_t size) { return obstack_alloc (&hsa_obstack, size); } /* Verify the instruction. */ void hsa_insn_basic::verify () { hsa_op_address *addr; hsa_op_reg *reg; /* Iterate all register operands and verify that the instruction is set in uses of the register. */ for (unsigned i = 0; i < operand_count (); i++) { hsa_op_base *use = get_op (i); if ((addr = dyn_cast <hsa_op_address *> (use)) && addr->m_reg) { gcc_assert (addr->m_reg->m_def_insn != this); use = addr->m_reg; } if ((reg = dyn_cast <hsa_op_reg *> (use)) && !op_output_p (i)) { unsigned j; for (j = 0; j < reg->m_uses.length (); j++) { if (reg->m_uses[j] == this) break; } if (j == reg->m_uses.length ()) { error ("HSA instruction uses a register but is not among " "recorded register uses"); debug_hsa_operand (reg); debug_hsa_insn (this); internal_error ("HSA instruction verification failed"); } } } } /* Constructor of an instruction representing a PHI node. NOPS is the number of operands (equal to the number of predecessors). */ hsa_insn_phi::hsa_insn_phi (unsigned nops, hsa_op_reg *dst) : hsa_insn_basic (nops, HSA_OPCODE_PHI), m_dest (dst) { dst->set_definition (this); } /* Constructor of class representing instructions for control flow and sychronization, */ hsa_insn_br::hsa_insn_br (unsigned nops, int opc, BrigType16_t t, BrigWidth8_t width, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3), m_width (width) { } /* Constructor of class representing instruction for conditional jump, CTRL is the control register determining whether the jump will be carried out, the new instruction is automatically added to its uses list. */ hsa_insn_cbr::hsa_insn_cbr (hsa_op_reg *ctrl) : hsa_insn_br (1, BRIG_OPCODE_CBR, BRIG_TYPE_B1, BRIG_WIDTH_1, ctrl) { } /* Constructor of class representing instruction for switch jump, CTRL is the index register. */ hsa_insn_sbr::hsa_insn_sbr (hsa_op_reg *index, unsigned jump_count) : hsa_insn_basic (1, BRIG_OPCODE_SBR, BRIG_TYPE_B1, index), m_width (BRIG_WIDTH_1), m_jump_table (vNULL), m_label_code_list (new hsa_op_code_list (jump_count)) { } /* Replace all occurrences of OLD_BB with NEW_BB in the statements jump table. */ void hsa_insn_sbr::replace_all_labels (basic_block old_bb, basic_block new_bb) { for (unsigned i = 0; i < m_jump_table.length (); i++) if (m_jump_table[i] == old_bb) m_jump_table[i] = new_bb; } hsa_insn_sbr::~hsa_insn_sbr () { m_jump_table.release (); } /* Constructor of comparison instruction. CMP is the comparison operation and T is the result type. */ hsa_insn_cmp::hsa_insn_cmp (BrigCompareOperation8_t cmp, BrigType16_t t, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2) : hsa_insn_basic (3 , BRIG_OPCODE_CMP, t, arg0, arg1, arg2), m_compare (cmp) { } /* Constructor of classes representing memory accesses. OPC is the opcode (must be BRIG_OPCODE_ST or BRIG_OPCODE_LD) and T is the type. The instruction operands are provided as ARG0 and ARG1. */ hsa_insn_mem::hsa_insn_mem (int opc, BrigType16_t t, hsa_op_base *arg0, hsa_op_base *arg1) : hsa_insn_basic (2, opc, t, arg0, arg1), m_align (hsa_natural_alignment (t)), m_equiv_class (0) { gcc_checking_assert (opc == BRIG_OPCODE_LD || opc == BRIG_OPCODE_ST); } /* Constructor for descendants allowing different opcodes and number of operands, it passes its arguments directly to hsa_insn_basic constructor. The instruction operands are provided as ARG[0-3]. */ hsa_insn_mem::hsa_insn_mem (unsigned nops, int opc, BrigType16_t t, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3), m_align (hsa_natural_alignment (t)), m_equiv_class (0) { } /* Constructor of class representing atomic instructions. OPC is the principal opcode, AOP is the specific atomic operation opcode. T is the type of the instruction. The instruction operands are provided as ARG[0-3]. */ hsa_insn_atomic::hsa_insn_atomic (int nops, int opc, enum BrigAtomicOperation aop, BrigType16_t t, BrigMemoryOrder memorder, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : hsa_insn_mem (nops, opc, t, arg0, arg1, arg2, arg3), m_atomicop (aop), m_memoryorder (memorder), m_memoryscope (BRIG_MEMORY_SCOPE_SYSTEM) { gcc_checking_assert (opc == BRIG_OPCODE_ATOMICNORET || opc == BRIG_OPCODE_ATOMIC || opc == BRIG_OPCODE_SIGNAL || opc == BRIG_OPCODE_SIGNALNORET); } /* Constructor of class representing signal instructions. OPC is the prinicpal opcode, SOP is the specific signal operation opcode. T is the type of the instruction. The instruction operands are provided as ARG[0-3]. */ hsa_insn_signal::hsa_insn_signal (int nops, int opc, enum BrigAtomicOperation sop, BrigType16_t t, BrigMemoryOrder memorder, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : hsa_insn_basic (nops, opc, t, arg0, arg1, arg2, arg3), m_memory_order (memorder), m_signalop (sop) { } /* Constructor of class representing segment conversion instructions. OPC is the opcode which must be either BRIG_OPCODE_STOF or BRIG_OPCODE_FTOS. DEST and SRCT are destination and source types respectively, SEG is the segment we are converting to or from. The instruction operands are provided as ARG0 and ARG1. */ hsa_insn_seg::hsa_insn_seg (int opc, BrigType16_t dest, BrigType16_t srct, BrigSegment8_t seg, hsa_op_base *arg0, hsa_op_base *arg1) : hsa_insn_basic (2, opc, dest, arg0, arg1), m_src_type (srct), m_segment (seg) { gcc_checking_assert (opc == BRIG_OPCODE_STOF || opc == BRIG_OPCODE_FTOS); } /* Constructor of class representing a call instruction. CALLEE is the tree representation of the function being called. */ hsa_insn_call::hsa_insn_call (tree callee) : hsa_insn_basic (0, BRIG_OPCODE_CALL), m_called_function (callee), m_output_arg (NULL), m_args_code_list (NULL), m_result_code_list (NULL) { } hsa_insn_call::hsa_insn_call (hsa_internal_fn *fn) : hsa_insn_basic (0, BRIG_OPCODE_CALL), m_called_function (NULL), m_called_internal_fn (fn), m_output_arg (NULL), m_args_code_list (NULL), m_result_code_list (NULL) { } hsa_insn_call::~hsa_insn_call () { for (unsigned i = 0; i < m_input_args.length (); i++) delete m_input_args[i]; delete m_output_arg; m_input_args.release (); m_input_arg_insns.release (); } /* Constructor of class representing the argument block required to invoke a call in HSAIL. */ hsa_insn_arg_block::hsa_insn_arg_block (BrigKind brig_kind, hsa_insn_call * call) : hsa_insn_basic (0, HSA_OPCODE_ARG_BLOCK), m_kind (brig_kind), m_call_insn (call) { } hsa_insn_comment::hsa_insn_comment (const char *s) : hsa_insn_basic (0, BRIG_KIND_DIRECTIVE_COMMENT) { unsigned l = strlen (s); /* Append '// ' to the string. */ char *buf = XNEWVEC (char, l + 4); sprintf (buf, "// %s", s); m_comment = buf; } hsa_insn_comment::~hsa_insn_comment () { gcc_checking_assert (m_comment); free (m_comment); m_comment = NULL; } /* Constructor of class representing the queue instruction in HSAIL. */ hsa_insn_queue::hsa_insn_queue (int nops, int opcode, BrigSegment segment, BrigMemoryOrder memory_order, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2, hsa_op_base *arg3) : hsa_insn_basic (nops, opcode, BRIG_TYPE_U64, arg0, arg1, arg2, arg3), m_segment (segment), m_memory_order (memory_order) { } /* Constructor of class representing the source type instruction in HSAIL. */ hsa_insn_srctype::hsa_insn_srctype (int nops, BrigOpcode opcode, BrigType16_t destt, BrigType16_t srct, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2 = NULL) : hsa_insn_basic (nops, opcode, destt, arg0, arg1, arg2), m_source_type (srct) {} /* Constructor of class representing the packed instruction in HSAIL. */ hsa_insn_packed::hsa_insn_packed (int nops, BrigOpcode opcode, BrigType16_t destt, BrigType16_t srct, hsa_op_base *arg0, hsa_op_base *arg1, hsa_op_base *arg2) : hsa_insn_srctype (nops, opcode, destt, srct, arg0, arg1, arg2) { m_operand_list = new hsa_op_operand_list (nops - 1); } /* Constructor of class representing the convert instruction in HSAIL. */ hsa_insn_cvt::hsa_insn_cvt (hsa_op_with_type *dest, hsa_op_with_type *src) : hsa_insn_basic (2, BRIG_OPCODE_CVT, dest->m_type, dest, src) { } /* Constructor of class representing the alloca in HSAIL. */ hsa_insn_alloca::hsa_insn_alloca (hsa_op_with_type *dest, hsa_op_with_type *size, unsigned alignment) : hsa_insn_basic (2, BRIG_OPCODE_ALLOCA, dest->m_type, dest, size), m_align (BRIG_ALIGNMENT_8) { gcc_assert (dest->m_type == BRIG_TYPE_U32); if (alignment) m_align = hsa_alignment_encoding (alignment); } /* Append an instruction INSN into the basic block. */ void hsa_bb::append_insn (hsa_insn_basic *insn) { gcc_assert (insn->m_opcode != 0 || insn->operand_count () == 0); gcc_assert (!insn->m_bb); insn->m_bb = m_bb; insn->m_prev = m_last_insn; insn->m_next = NULL; if (m_last_insn) m_last_insn->m_next = insn; m_last_insn = insn; if (!m_first_insn) m_first_insn = insn; } void hsa_bb::append_phi (hsa_insn_phi *hphi) { hphi->m_bb = m_bb; hphi->m_prev = m_last_phi; hphi->m_next = NULL; if (m_last_phi) m_last_phi->m_next = hphi; m_last_phi = hphi; if (!m_first_phi) m_first_phi = hphi; } /* Insert HSA instruction NEW_INSN immediately before an existing instruction OLD_INSN. */ static void hsa_insert_insn_before (hsa_insn_basic *new_insn, hsa_insn_basic *old_insn) { hsa_bb *hbb = hsa_bb_for_bb (old_insn->m_bb); if (hbb->m_first_insn == old_insn) hbb->m_first_insn = new_insn; new_insn->m_prev = old_insn->m_prev; new_insn->m_next = old_insn; if (old_insn->m_prev) old_insn->m_prev->m_next = new_insn; old_insn->m_prev = new_insn; } /* Append HSA instruction NEW_INSN immediately after an existing instruction OLD_INSN. */ static void hsa_append_insn_after (hsa_insn_basic *new_insn, hsa_insn_basic *old_insn) { hsa_bb *hbb = hsa_bb_for_bb (old_insn->m_bb); if (hbb->m_last_insn == old_insn) hbb->m_last_insn = new_insn; new_insn->m_prev = old_insn; new_insn->m_next = old_insn->m_next; if (old_insn->m_next) old_insn->m_next->m_prev = new_insn; old_insn->m_next = new_insn; } /* Return a register containing the calculated value of EXP which must be an expression consisting of PLUS_EXPRs, MULT_EXPRs, NOP_EXPRs, SSA_NAMEs and integer constants as returned by get_inner_reference. Newly generated HSA instructions will be appended to HBB. Perform all calculations in ADDRTYPE. */ static hsa_op_with_type * gen_address_calculation (tree exp, hsa_bb *hbb, BrigType16_t addrtype) { int opcode; if (TREE_CODE (exp) == NOP_EXPR) exp = TREE_OPERAND (exp, 0); switch (TREE_CODE (exp)) { case SSA_NAME: return hsa_cfun->reg_for_gimple_ssa (exp)->get_in_type (addrtype, hbb); case INTEGER_CST: { hsa_op_immed *imm = new hsa_op_immed (exp); if (addrtype != imm->m_type) imm->m_type = addrtype; return imm; } case PLUS_EXPR: opcode = BRIG_OPCODE_ADD; break; case MULT_EXPR: opcode = BRIG_OPCODE_MUL; break; default: gcc_unreachable (); } hsa_op_reg *res = new hsa_op_reg (addrtype); hsa_insn_basic *insn = new hsa_insn_basic (3, opcode, addrtype); insn->set_op (0, res); hsa_op_with_type *op1 = gen_address_calculation (TREE_OPERAND (exp, 0), hbb, addrtype); hsa_op_with_type *op2 = gen_address_calculation (TREE_OPERAND (exp, 1), hbb, addrtype); insn->set_op (1, op1); insn->set_op (2, op2); hbb->append_insn (insn); return res; } /* If R1 is NULL, just return R2, otherwise append an instruction adding them to HBB and return the register holding the result. */ static hsa_op_reg * add_addr_regs_if_needed (hsa_op_reg *r1, hsa_op_reg *r2, hsa_bb *hbb) { gcc_checking_assert (r2); if (!r1) return r2; hsa_op_reg *res = new hsa_op_reg (r1->m_type); gcc_assert (!hsa_needs_cvt (r1->m_type, r2->m_type)); hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_ADD, res->m_type); insn->set_op (0, res); insn->set_op (1, r1); insn->set_op (2, r2); hbb->append_insn (insn); return res; } /* Helper of gen_hsa_addr. Update *SYMBOL, *ADDRTYPE, *REG and *OFFSET to reflect BASE which is the first operand of a MEM_REF or a TARGET_MEM_REF. */ static void process_mem_base (tree base, hsa_symbol **symbol, BrigType16_t *addrtype, hsa_op_reg **reg, offset_int *offset, hsa_bb *hbb) { if (TREE_CODE (base) == SSA_NAME) { gcc_assert (!*reg); hsa_op_with_type *ssa = hsa_cfun->reg_for_gimple_ssa (base)->get_in_type (*addrtype, hbb); *reg = dyn_cast <hsa_op_reg *> (ssa); } else if (TREE_CODE (base) == ADDR_EXPR) { tree decl = TREE_OPERAND (base, 0); if (!DECL_P (decl) || TREE_CODE (decl) == FUNCTION_DECL) { HSA_SORRY_AT (EXPR_LOCATION (base), "support for HSA does not implement a memory reference " "to a non-declaration type"); return; } gcc_assert (!*symbol); *symbol = get_symbol_for_decl (decl); *addrtype = hsa_get_segment_addr_type ((*symbol)->m_segment); } else if (TREE_CODE (base) == INTEGER_CST) *offset += wi::to_offset (base); else gcc_unreachable (); } /* Forward declaration of a function. */ static void gen_hsa_addr_insns (tree val, hsa_op_reg *dest, hsa_bb *hbb); /* Generate HSA address operand for a given tree memory reference REF. If instructions need to be created to calculate the address, they will be added to the end of HBB. If a caller provider OUTPUT_BITSIZE and OUTPUT_BITPOS, the function assumes that the caller will handle possible bit-field references. Otherwise if we reference a bit-field, sorry message is displayed. */ static hsa_op_address * gen_hsa_addr (tree ref, hsa_bb *hbb, HOST_WIDE_INT *output_bitsize = NULL, HOST_WIDE_INT *output_bitpos = NULL) { hsa_symbol *symbol = NULL; hsa_op_reg *reg = NULL; offset_int offset = 0; tree origref = ref; tree varoffset = NULL_TREE; BrigType16_t addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); HOST_WIDE_INT bitsize = 0, bitpos = 0; BrigType16_t flat_addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); if (TREE_CODE (ref) == STRING_CST) { symbol = hsa_get_string_cst_symbol (ref); goto out; } else if (TREE_CODE (ref) == BIT_FIELD_REF && (!multiple_p (bit_field_size (ref), BITS_PER_UNIT) || !multiple_p (bit_field_offset (ref), BITS_PER_UNIT))) { HSA_SORRY_ATV (EXPR_LOCATION (origref), "support for HSA does not implement " "bit field references such as %E", ref); goto out; } if (handled_component_p (ref)) { machine_mode mode; int unsignedp, volatilep, preversep; poly_int64 pbitsize, pbitpos; tree new_ref; new_ref = get_inner_reference (ref, &pbitsize, &pbitpos, &varoffset, &mode, &unsignedp, &preversep, &volatilep); /* When this isn't true, the switch below will report an appropriate error. */ if (pbitsize.is_constant () && pbitpos.is_constant ()) { bitsize = pbitsize.to_constant (); bitpos = pbitpos.to_constant (); ref = new_ref; offset = bitpos; offset = wi::rshift (offset, LOG2_BITS_PER_UNIT, SIGNED); } } switch (TREE_CODE (ref)) { case ADDR_EXPR: { addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_PRIVATE); symbol = hsa_cfun->create_hsa_temporary (flat_addrtype); hsa_op_reg *r = new hsa_op_reg (flat_addrtype); gen_hsa_addr_insns (ref, r, hbb); hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, r->m_type, r, new hsa_op_address (symbol))); break; } case SSA_NAME: { addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_PRIVATE); hsa_op_with_type *r = hsa_cfun->reg_for_gimple_ssa (ref); if (r->m_type == BRIG_TYPE_B1) r = r->get_in_type (BRIG_TYPE_U32, hbb); symbol = hsa_cfun->create_hsa_temporary (r->m_type); hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, r->m_type, r, new hsa_op_address (symbol))); break; } case PARM_DECL: case VAR_DECL: case RESULT_DECL: case CONST_DECL: gcc_assert (!symbol); symbol = get_symbol_for_decl (ref); addrtype = hsa_get_segment_addr_type (symbol->m_segment); break; case MEM_REF: process_mem_base (TREE_OPERAND (ref, 0), &symbol, &addrtype, &reg, &offset, hbb); if (!integer_zerop (TREE_OPERAND (ref, 1))) offset += wi::to_offset (TREE_OPERAND (ref, 1)); break; case TARGET_MEM_REF: process_mem_base (TMR_BASE (ref), &symbol, &addrtype, &reg, &offset, hbb); if (TMR_INDEX (ref)) { hsa_op_reg *disp1; hsa_op_base *idx = hsa_cfun->reg_for_gimple_ssa (TMR_INDEX (ref))->get_in_type (addrtype, hbb); if (TMR_STEP (ref) && !integer_onep (TMR_STEP (ref))) { disp1 = new hsa_op_reg (addrtype); hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_MUL, addrtype); /* As step must respect addrtype, we overwrite the type of an immediate value. */ hsa_op_immed *step = new hsa_op_immed (TMR_STEP (ref)); step->m_type = addrtype; insn->set_op (0, disp1); insn->set_op (1, idx); insn->set_op (2, step); hbb->append_insn (insn); } else disp1 = as_a <hsa_op_reg *> (idx); reg = add_addr_regs_if_needed (reg, disp1, hbb); } if (TMR_INDEX2 (ref)) { if (TREE_CODE (TMR_INDEX2 (ref)) == SSA_NAME) { hsa_op_base *disp2 = hsa_cfun->reg_for_gimple_ssa (TMR_INDEX2 (ref))->get_in_type (addrtype, hbb); reg = add_addr_regs_if_needed (reg, as_a <hsa_op_reg *> (disp2), hbb); } else if (TREE_CODE (TMR_INDEX2 (ref)) == INTEGER_CST) offset += wi::to_offset (TMR_INDEX2 (ref)); else gcc_unreachable (); } offset += wi::to_offset (TMR_OFFSET (ref)); break; case FUNCTION_DECL: HSA_SORRY_AT (EXPR_LOCATION (origref), "support for HSA does not implement function pointers"); goto out; default: HSA_SORRY_ATV (EXPR_LOCATION (origref), "support for HSA does " "not implement memory access to %E", origref); goto out; } if (varoffset) { if (TREE_CODE (varoffset) == INTEGER_CST) offset += wi::to_offset (varoffset); else { hsa_op_base *off_op = gen_address_calculation (varoffset, hbb, addrtype); reg = add_addr_regs_if_needed (reg, as_a <hsa_op_reg *> (off_op), hbb); } } gcc_checking_assert ((symbol && addrtype == hsa_get_segment_addr_type (symbol->m_segment)) || (!symbol && addrtype == hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT))); out: HOST_WIDE_INT hwi_offset = offset.to_shwi (); /* Calculate remaining bitsize offset (if presented). */ bitpos %= BITS_PER_UNIT; /* If bitsize is a power of two that is greater or equal to BITS_PER_UNIT, it is not a reason to think this is a bit-field access. */ if (bitpos == 0 && (bitsize >= BITS_PER_UNIT) && !(bitsize & (bitsize - 1))) bitsize = 0; if ((bitpos || bitsize) && (output_bitpos == NULL || output_bitsize == NULL)) HSA_SORRY_ATV (EXPR_LOCATION (origref), "support for HSA does not " "implement unhandled bit field reference such as %E", ref); if (output_bitsize != NULL && output_bitpos != NULL) { *output_bitsize = bitsize; *output_bitpos = bitpos; } return new hsa_op_address (symbol, reg, hwi_offset); } /* Generate HSA address operand for a given tree memory reference REF. If instructions need to be created to calculate the address, they will be added to the end of HBB. OUTPUT_ALIGN is alignment of the created address. */ static hsa_op_address * gen_hsa_addr_with_align (tree ref, hsa_bb *hbb, BrigAlignment8_t *output_align) { hsa_op_address *addr = gen_hsa_addr (ref, hbb); if (addr->m_reg || !addr->m_symbol) *output_align = hsa_object_alignment (ref); else { /* If the address consists only of a symbol and an offset, we compute the alignment ourselves to take into account any alignment promotions we might have done for the HSA symbol representation. */ unsigned align = hsa_byte_alignment (addr->m_symbol->m_align); unsigned misalign = addr->m_imm_offset & (align - 1); if (misalign) align = least_bit_hwi (misalign); *output_align = hsa_alignment_encoding (BITS_PER_UNIT * align); } return addr; } /* Generate HSA address for a function call argument of given TYPE. INDEX is used to generate corresponding name of the arguments. Special value -1 represents fact that result value is created. */ static hsa_op_address * gen_hsa_addr_for_arg (tree tree_type, int index) { hsa_symbol *sym = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG, BRIG_LINKAGE_ARG); sym->m_type = hsa_type_for_tree_type (tree_type, &sym->m_dim); if (index == -1) /* Function result. */ sym->m_name = "res"; else /* Function call arguments. */ { sym->m_name = NULL; sym->m_name_number = index; } return new hsa_op_address (sym); } /* Generate HSA instructions that process all necessary conversions of an ADDR to flat addressing and place the result into DEST. Instructions are appended to HBB. */ static void convert_addr_to_flat_segment (hsa_op_address *addr, hsa_op_reg *dest, hsa_bb *hbb) { hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_LDA); insn->set_op (1, addr); if (addr->m_symbol && addr->m_symbol->m_segment != BRIG_SEGMENT_GLOBAL) { /* LDA produces segment-relative address, we need to convert it to the flat one. */ hsa_op_reg *tmp; tmp = new hsa_op_reg (hsa_get_segment_addr_type (addr->m_symbol->m_segment)); hsa_insn_seg *seg; seg = new hsa_insn_seg (BRIG_OPCODE_STOF, hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT), tmp->m_type, addr->m_symbol->m_segment, dest, tmp); insn->set_op (0, tmp); insn->m_type = tmp->m_type; hbb->append_insn (insn); hbb->append_insn (seg); } else { insn->set_op (0, dest); insn->m_type = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); hbb->append_insn (insn); } } /* Generate HSA instructions that calculate address of VAL including all necessary conversions to flat addressing and place the result into DEST. Instructions are appended to HBB. */ static void gen_hsa_addr_insns (tree val, hsa_op_reg *dest, hsa_bb *hbb) { /* Handle cases like tmp = NULL, where we just emit a move instruction to a register. */ if (TREE_CODE (val) == INTEGER_CST) { hsa_op_immed *c = new hsa_op_immed (val); hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, dest, c); hbb->append_insn (insn); return; } hsa_op_address *addr; gcc_assert (dest->m_type == hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT)); if (TREE_CODE (val) == ADDR_EXPR) val = TREE_OPERAND (val, 0); addr = gen_hsa_addr (val, hbb); if (TREE_CODE (val) == CONST_DECL && is_gimple_reg_type (TREE_TYPE (val))) { gcc_assert (addr->m_symbol && addr->m_symbol->m_segment == BRIG_SEGMENT_READONLY); /* CONST_DECLs are in readonly segment which however does not have addresses convertible to flat segments. So copy it to a private one and take address of that. */ BrigType16_t csttype = mem_type_for_type (hsa_type_for_scalar_tree_type (TREE_TYPE (val), false)); hsa_op_reg *r = new hsa_op_reg (csttype); hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_LD, csttype, r, new hsa_op_address (addr->m_symbol))); hsa_symbol *copysym = hsa_cfun->create_hsa_temporary (csttype); hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_ST, csttype, r, new hsa_op_address (copysym))); addr->m_symbol = copysym; } else if (addr->m_symbol && addr->m_symbol->m_segment == BRIG_SEGMENT_READONLY) { HSA_SORRY_ATV (EXPR_LOCATION (val), "support for HSA does " "not implement taking addresses of complex " "%<CONST_DECL%> such as %E", val); return; } convert_addr_to_flat_segment (addr, dest, hbb); } /* Return an HSA register or HSA immediate value operand corresponding to gimple operand OP. */ static hsa_op_with_type * hsa_reg_or_immed_for_gimple_op (tree op, hsa_bb *hbb) { hsa_op_reg *tmp; if (TREE_CODE (op) == SSA_NAME) tmp = hsa_cfun->reg_for_gimple_ssa (op); else if (!POINTER_TYPE_P (TREE_TYPE (op))) return new hsa_op_immed (op); else { tmp = new hsa_op_reg (hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT)); gen_hsa_addr_insns (op, tmp, hbb); } return tmp; } /* Create a simple movement instruction with register destination DEST and register or immediate source SRC and append it to the end of HBB. */ void hsa_build_append_simple_mov (hsa_op_reg *dest, hsa_op_base *src, hsa_bb *hbb) { /* Moves of packed data between registers need to adhere to the same type rules like when dealing with memory. */ BrigType16_t tp = mem_type_for_type (dest->m_type); hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV, tp, dest, src); hsa_fixup_mov_insn_type (insn); unsigned dest_size = hsa_type_bit_size (dest->m_type); if (hsa_op_reg *sreg = dyn_cast <hsa_op_reg *> (src)) gcc_assert (dest_size == hsa_type_bit_size (sreg->m_type)); else { unsigned imm_size = hsa_type_bit_size (as_a <hsa_op_immed *> (src)->m_type); gcc_assert ((dest_size == imm_size) /* Eventually < 32bit registers will be promoted to 32bit. */ || (dest_size < 32 && imm_size == 32)); } hbb->append_insn (insn); } /* Generate HSAIL instructions loading a bit field into register DEST. VALUE_REG is a register of a SSA name that is used in the bit field reference. To identify a bit field BITPOS is offset to the loaded memory and BITSIZE is number of bits of the bit field. Add instructions to HBB. */ static void gen_hsa_insns_for_bitfield (hsa_op_reg *dest, hsa_op_reg *value_reg, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, hsa_bb *hbb) { unsigned type_bitsize = hsa_type_bit_size (hsa_extend_inttype_to_32bit (dest->m_type)); unsigned left_shift = type_bitsize - (bitsize + bitpos); unsigned right_shift = left_shift + bitpos; if (left_shift) { hsa_op_reg *value_reg_2 = new hsa_op_reg (hsa_extend_inttype_to_32bit (dest->m_type)); hsa_op_immed *c = new hsa_op_immed (left_shift, BRIG_TYPE_U32); hsa_insn_basic *lshift = new hsa_insn_basic (3, BRIG_OPCODE_SHL, value_reg_2->m_type, value_reg_2, value_reg, c); hbb->append_insn (lshift); value_reg = value_reg_2; } if (right_shift) { hsa_op_reg *value_reg_2 = new hsa_op_reg (hsa_extend_inttype_to_32bit (dest->m_type)); hsa_op_immed *c = new hsa_op_immed (right_shift, BRIG_TYPE_U32); hsa_insn_basic *rshift = new hsa_insn_basic (3, BRIG_OPCODE_SHR, value_reg_2->m_type, value_reg_2, value_reg, c); hbb->append_insn (rshift); value_reg = value_reg_2; } hsa_insn_basic *assignment = new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, NULL, value_reg); hsa_fixup_mov_insn_type (assignment); hbb->append_insn (assignment); assignment->set_output_in_type (dest, 0, hbb); } /* Generate HSAIL instructions loading a bit field into register DEST. ADDR is prepared memory address which is used to load the bit field. To identify a bit field BITPOS is offset to the loaded memory and BITSIZE is number of bits of the bit field. Add instructions to HBB. Load must be performed in alignment ALIGN. */ static void gen_hsa_insns_for_bitfield_load (hsa_op_reg *dest, hsa_op_address *addr, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, hsa_bb *hbb, BrigAlignment8_t align) { hsa_op_reg *value_reg = new hsa_op_reg (dest->m_type); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, hsa_extend_inttype_to_32bit (dest->m_type), value_reg, addr); mem->set_align (align); hbb->append_insn (mem); gen_hsa_insns_for_bitfield (dest, value_reg, bitsize, bitpos, hbb); } /* Return the alignment of base memory accesses we issue to perform bit-field memory access REF. */ static BrigAlignment8_t hsa_bitmemref_alignment (tree ref) { unsigned HOST_WIDE_INT bit_offset = 0; while (true) { if (TREE_CODE (ref) == BIT_FIELD_REF) { if (!tree_fits_uhwi_p (TREE_OPERAND (ref, 2))) return BRIG_ALIGNMENT_1; bit_offset += tree_to_uhwi (TREE_OPERAND (ref, 2)); } else if (TREE_CODE (ref) == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (ref, 1))) bit_offset += int_bit_position (TREE_OPERAND (ref, 1)); else break; ref = TREE_OPERAND (ref, 0); } unsigned HOST_WIDE_INT bits = bit_offset % BITS_PER_UNIT; unsigned HOST_WIDE_INT byte_bits = bit_offset - bits; BrigAlignment8_t base = hsa_object_alignment (ref); if (byte_bits == 0) return base; return MIN (base, hsa_alignment_encoding (least_bit_hwi (byte_bits))); } /* Generate HSAIL instructions loading something into register DEST. RHS is tree representation of the loaded data, which are loaded as type TYPE. Add instructions to HBB. */ static void gen_hsa_insns_for_load (hsa_op_reg *dest, tree rhs, tree type, hsa_bb *hbb) { /* The destination SSA name will give us the type. */ if (TREE_CODE (rhs) == VIEW_CONVERT_EXPR) rhs = TREE_OPERAND (rhs, 0); if (TREE_CODE (rhs) == SSA_NAME) { hsa_op_reg *src = hsa_cfun->reg_for_gimple_ssa (rhs); hsa_build_append_simple_mov (dest, src, hbb); } else if (is_gimple_min_invariant (rhs) || TREE_CODE (rhs) == ADDR_EXPR) { if (POINTER_TYPE_P (TREE_TYPE (rhs))) { if (dest->m_type != hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT)) { HSA_SORRY_ATV (EXPR_LOCATION (rhs), "support for HSA does not implement conversion " "of %E to the requested non-pointer type", rhs); return; } gen_hsa_addr_insns (rhs, dest, hbb); } else if (TREE_CODE (rhs) == COMPLEX_CST) { hsa_op_immed *real_part = new hsa_op_immed (TREE_REALPART (rhs)); hsa_op_immed *imag_part = new hsa_op_immed (TREE_IMAGPART (rhs)); hsa_op_reg *real_part_reg = new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (type), true)); hsa_op_reg *imag_part_reg = new hsa_op_reg (hsa_type_for_scalar_tree_type (TREE_TYPE (type), true)); hsa_build_append_simple_mov (real_part_reg, real_part, hbb); hsa_build_append_simple_mov (imag_part_reg, imag_part, hbb); BrigType16_t src_type = hsa_bittype_for_type (real_part_reg->m_type); hsa_insn_packed *insn = new hsa_insn_packed (3, BRIG_OPCODE_COMBINE, dest->m_type, src_type, dest, real_part_reg, imag_part_reg); hbb->append_insn (insn); } else { hsa_op_immed *imm = new hsa_op_immed (rhs); hsa_build_append_simple_mov (dest, imm, hbb); } } else if (TREE_CODE (rhs) == REALPART_EXPR || TREE_CODE (rhs) == IMAGPART_EXPR) { tree pack_type = TREE_TYPE (TREE_OPERAND (rhs, 0)); hsa_op_reg *packed_reg = new hsa_op_reg (hsa_type_for_scalar_tree_type (pack_type, true)); tree complex_rhs = TREE_OPERAND (rhs, 0); gen_hsa_insns_for_load (packed_reg, complex_rhs, TREE_TYPE (complex_rhs), hbb); hsa_op_reg *real_reg = new hsa_op_reg (hsa_type_for_scalar_tree_type (type, true)); hsa_op_reg *imag_reg = new hsa_op_reg (hsa_type_for_scalar_tree_type (type, true)); BrigKind16_t brig_type = packed_reg->m_type; hsa_insn_packed *packed = new hsa_insn_packed (3, BRIG_OPCODE_EXPAND, hsa_bittype_for_type (real_reg->m_type), brig_type, real_reg, imag_reg, packed_reg); hbb->append_insn (packed); hsa_op_reg *source = TREE_CODE (rhs) == REALPART_EXPR ? real_reg : imag_reg; hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, NULL, source); hsa_fixup_mov_insn_type (insn); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } else if (TREE_CODE (rhs) == BIT_FIELD_REF && TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME) { tree ssa_name = TREE_OPERAND (rhs, 0); HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs, 1)); HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs, 2)); hsa_op_reg *imm_value = hsa_cfun->reg_for_gimple_ssa (ssa_name); gen_hsa_insns_for_bitfield (dest, imm_value, bitsize, bitpos, hbb); } else if (DECL_P (rhs) || TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF || handled_component_p (rhs)) { HOST_WIDE_INT bitsize, bitpos; /* Load from memory. */ hsa_op_address *addr; addr = gen_hsa_addr (rhs, hbb, &bitsize, &bitpos); /* Handle load of a bit field. */ if (bitsize > 64) { HSA_SORRY_AT (EXPR_LOCATION (rhs), "support for HSA does not implement load from a bit " "field bigger than 64 bits"); return; } if (bitsize || bitpos) gen_hsa_insns_for_bitfield_load (dest, addr, bitsize, bitpos, hbb, hsa_bitmemref_alignment (rhs)); else { BrigType16_t mtype; /* Not dest->m_type, that's possibly extended. */ mtype = mem_type_for_type (hsa_type_for_scalar_tree_type (type, false)); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, mtype, dest, addr); mem->set_align (hsa_object_alignment (rhs)); hbb->append_insn (mem); } } else HSA_SORRY_ATV (EXPR_LOCATION (rhs), "support for HSA does not implement loading " "of expression %E", rhs); } /* Return number of bits necessary for representation of a bit field, starting at BITPOS with size of BITSIZE. */ static unsigned get_bitfield_size (unsigned bitpos, unsigned bitsize) { unsigned s = bitpos + bitsize; unsigned sizes[] = {8, 16, 32, 64}; for (unsigned i = 0; i < 4; i++) if (s <= sizes[i]) return sizes[i]; gcc_unreachable (); return 0; } /* Generate HSAIL instructions storing into memory. LHS is the destination of the store, SRC is the source operand. Add instructions to HBB. */ static void gen_hsa_insns_for_store (tree lhs, hsa_op_base *src, hsa_bb *hbb) { HOST_WIDE_INT bitsize = 0, bitpos = 0; BrigAlignment8_t req_align; BrigType16_t mtype; mtype = mem_type_for_type (hsa_type_for_scalar_tree_type (TREE_TYPE (lhs), false)); hsa_op_address *addr; addr = gen_hsa_addr (lhs, hbb, &bitsize, &bitpos); /* Handle store to a bit field. */ if (bitsize > 64) { HSA_SORRY_AT (EXPR_LOCATION (lhs), "support for HSA does not implement store to a bit field " "bigger than 64 bits"); return; } unsigned type_bitsize = get_bitfield_size (bitpos, bitsize); /* HSAIL does not support MOV insn with 16-bits integers. */ if (type_bitsize < 32) type_bitsize = 32; if (bitpos || (bitsize && type_bitsize != bitsize)) { unsigned HOST_WIDE_INT mask = 0; BrigType16_t mem_type = get_integer_type_by_bytes (type_bitsize / BITS_PER_UNIT, !TYPE_UNSIGNED (TREE_TYPE (lhs))); for (unsigned i = 0; i < type_bitsize; i++) if (i < bitpos || i >= bitpos + bitsize) mask |= ((unsigned HOST_WIDE_INT)1 << i); hsa_op_reg *value_reg = new hsa_op_reg (mem_type); req_align = hsa_bitmemref_alignment (lhs); /* Load value from memory. */ hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, mem_type, value_reg, addr); mem->set_align (req_align); hbb->append_insn (mem); /* AND the loaded value with prepared mask. */ hsa_op_reg *cleared_reg = new hsa_op_reg (mem_type); BrigType16_t t = get_integer_type_by_bytes (type_bitsize / BITS_PER_UNIT, false); hsa_op_immed *c = new hsa_op_immed (mask, t); hsa_insn_basic *clearing = new hsa_insn_basic (3, BRIG_OPCODE_AND, mem_type, cleared_reg, value_reg, c); hbb->append_insn (clearing); /* Shift to left a value that is going to be stored. */ hsa_op_reg *new_value_reg = new hsa_op_reg (mem_type); hsa_insn_basic *basic = new hsa_insn_basic (2, BRIG_OPCODE_MOV, mem_type, new_value_reg, src); hsa_fixup_mov_insn_type (basic); hbb->append_insn (basic); if (bitpos) { hsa_op_reg *shifted_value_reg = new hsa_op_reg (mem_type); c = new hsa_op_immed (bitpos, BRIG_TYPE_U32); hsa_insn_basic *basic = new hsa_insn_basic (3, BRIG_OPCODE_SHL, mem_type, shifted_value_reg, new_value_reg, c); hbb->append_insn (basic); new_value_reg = shifted_value_reg; } /* OR the prepared value with prepared chunk loaded from memory. */ hsa_op_reg *prepared_reg= new hsa_op_reg (mem_type); basic = new hsa_insn_basic (3, BRIG_OPCODE_OR, mem_type, prepared_reg, new_value_reg, cleared_reg); hbb->append_insn (basic); src = prepared_reg; mtype = mem_type; } else req_align = hsa_object_alignment (lhs); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, mtype, src, addr); mem->set_align (req_align); /* The HSAIL verifier has another constraint: if the source is an immediate then it must match the destination type. If it's a register the low bits will be used for sub-word stores. We're always allocating new operands so we can modify the above in place. */ if (hsa_op_immed *imm = dyn_cast <hsa_op_immed *> (src)) { if (!hsa_type_packed_p (imm->m_type)) imm->m_type = mem->m_type; else { /* ...and all vector immediates apparently need to be vectors of unsigned bytes. */ unsigned bs = hsa_type_bit_size (imm->m_type); gcc_assert (bs == hsa_type_bit_size (mem->m_type)); switch (bs) { case 32: imm->m_type = BRIG_TYPE_U8X4; break; case 64: imm->m_type = BRIG_TYPE_U8X8; break; case 128: imm->m_type = BRIG_TYPE_U8X16; break; default: gcc_unreachable (); } } } hbb->append_insn (mem); } /* Generate memory copy instructions that are going to be used for copying a SRC memory to TARGET memory, represented by pointer in a register. MIN_ALIGN is minimal alignment of provided HSA addresses. */ static void gen_hsa_memory_copy (hsa_bb *hbb, hsa_op_address *target, hsa_op_address *src, unsigned size, BrigAlignment8_t min_align) { hsa_op_address *addr; hsa_insn_mem *mem; unsigned offset = 0; unsigned min_byte_align = hsa_byte_alignment (min_align); while (size) { unsigned s; if (size >= 8) s = 8; else if (size >= 4) s = 4; else if (size >= 2) s = 2; else s = 1; if (s > min_byte_align) s = min_byte_align; BrigType16_t t = get_integer_type_by_bytes (s, false); hsa_op_reg *tmp = new hsa_op_reg (t); addr = new hsa_op_address (src->m_symbol, src->m_reg, src->m_imm_offset + offset); mem = new hsa_insn_mem (BRIG_OPCODE_LD, t, tmp, addr); hbb->append_insn (mem); addr = new hsa_op_address (target->m_symbol, target->m_reg, target->m_imm_offset + offset); mem = new hsa_insn_mem (BRIG_OPCODE_ST, t, tmp, addr); hbb->append_insn (mem); offset += s; size -= s; } } /* Create a memset mask that is created by copying a CONSTANT byte value to an integer of BYTE_SIZE bytes. */ static unsigned HOST_WIDE_INT build_memset_value (unsigned HOST_WIDE_INT constant, unsigned byte_size) { if (constant == 0) return 0; HOST_WIDE_INT v = constant; for (unsigned i = 1; i < byte_size; i++) v |= constant << (8 * i); return v; } /* Generate memory set instructions that are going to be used for setting a CONSTANT byte value to TARGET memory of SIZE bytes. MIN_ALIGN is minimal alignment of provided HSA addresses. */ static void gen_hsa_memory_set (hsa_bb *hbb, hsa_op_address *target, unsigned HOST_WIDE_INT constant, unsigned size, BrigAlignment8_t min_align) { hsa_op_address *addr; hsa_insn_mem *mem; unsigned offset = 0; unsigned min_byte_align = hsa_byte_alignment (min_align); while (size) { unsigned s; if (size >= 8) s = 8; else if (size >= 4) s = 4; else if (size >= 2) s = 2; else s = 1; if (s > min_byte_align) s = min_byte_align; addr = new hsa_op_address (target->m_symbol, target->m_reg, target->m_imm_offset + offset); BrigType16_t t = get_integer_type_by_bytes (s, false); HOST_WIDE_INT c = build_memset_value (constant, s); mem = new hsa_insn_mem (BRIG_OPCODE_ST, t, new hsa_op_immed (c, t), addr); hbb->append_insn (mem); offset += s; size -= s; } } /* Generate HSAIL instructions for a single assignment of an empty constructor to an ADDR_LHS. Constructor is passed as a tree RHS and all instructions are appended to HBB. ALIGN is alignment of the address. */ void gen_hsa_ctor_assignment (hsa_op_address *addr_lhs, tree rhs, hsa_bb *hbb, BrigAlignment8_t align) { if (CONSTRUCTOR_NELTS (rhs)) { HSA_SORRY_AT (EXPR_LOCATION (rhs), "support for HSA does not implement load from constructor"); return; } unsigned size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (rhs))); gen_hsa_memory_set (hbb, addr_lhs, 0, size, align); } /* Generate HSA instructions for a single assignment of RHS to LHS. HBB is the basic block they will be appended to. */ static void gen_hsa_insns_for_single_assignment (tree lhs, tree rhs, hsa_bb *hbb) { if (TREE_CODE (lhs) == SSA_NAME) { hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); if (hsa_seen_error ()) return; gen_hsa_insns_for_load (dest, rhs, TREE_TYPE (lhs), hbb); } else if (TREE_CODE (rhs) == SSA_NAME || (is_gimple_min_invariant (rhs) && TREE_CODE (rhs) != STRING_CST)) { /* Store to memory. */ hsa_op_base *src = hsa_reg_or_immed_for_gimple_op (rhs, hbb); if (hsa_seen_error ()) return; gen_hsa_insns_for_store (lhs, src, hbb); } else { BrigAlignment8_t lhs_align; hsa_op_address *addr_lhs = gen_hsa_addr_with_align (lhs, hbb, &lhs_align); if (TREE_CODE (rhs) == CONSTRUCTOR) gen_hsa_ctor_assignment (addr_lhs, rhs, hbb, lhs_align); else { BrigAlignment8_t rhs_align; hsa_op_address *addr_rhs = gen_hsa_addr_with_align (rhs, hbb, &rhs_align); unsigned size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (rhs))); gen_hsa_memory_copy (hbb, addr_lhs, addr_rhs, size, MIN (lhs_align, rhs_align)); } } } /* Prepend before INSN a load from spill symbol of SPILL_REG. Return the register into which we loaded. If this required another register to convert from a B1 type, return it in *PTMP2, otherwise store NULL into it. We assume we are out of SSA so the returned register does not have its definition set. */ hsa_op_reg * hsa_spill_in (hsa_insn_basic *insn, hsa_op_reg *spill_reg, hsa_op_reg **ptmp2) { hsa_symbol *spill_sym = spill_reg->m_spill_sym; hsa_op_reg *reg = new hsa_op_reg (spill_sym->m_type); hsa_op_address *addr = new hsa_op_address (spill_sym); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, spill_sym->m_type, reg, addr); hsa_insert_insn_before (mem, insn); *ptmp2 = NULL; if (spill_reg->m_type == BRIG_TYPE_B1) { hsa_insn_basic *cvtinsn; *ptmp2 = reg; reg = new hsa_op_reg (spill_reg->m_type); cvtinsn = new hsa_insn_cvt (reg, *ptmp2); hsa_insert_insn_before (cvtinsn, insn); } return reg; } /* Append after INSN a store to spill symbol of SPILL_REG. Return the register from which we stored. If this required another register to convert to a B1 type, return it in *PTMP2, otherwise store NULL into it. We assume we are out of SSA so the returned register does not have its use updated. */ hsa_op_reg * hsa_spill_out (hsa_insn_basic *insn, hsa_op_reg *spill_reg, hsa_op_reg **ptmp2) { hsa_symbol *spill_sym = spill_reg->m_spill_sym; hsa_op_reg *reg = new hsa_op_reg (spill_sym->m_type); hsa_op_address *addr = new hsa_op_address (spill_sym); hsa_op_reg *returnreg; *ptmp2 = NULL; returnreg = reg; if (spill_reg->m_type == BRIG_TYPE_B1) { hsa_insn_basic *cvtinsn; *ptmp2 = new hsa_op_reg (spill_sym->m_type); reg->m_type = spill_reg->m_type; cvtinsn = new hsa_insn_cvt (*ptmp2, returnreg); hsa_append_insn_after (cvtinsn, insn); insn = cvtinsn; reg = *ptmp2; } hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, spill_sym->m_type, reg, addr); hsa_append_insn_after (mem, insn); return returnreg; } /* Generate a comparison instruction that will compare LHS and RHS with comparison specified by CODE and put result into register DEST. DEST has to have its type set already but must not have its definition set yet. Generated instructions will be added to HBB. */ static void gen_hsa_cmp_insn_from_gimple (enum tree_code code, tree lhs, tree rhs, hsa_op_reg *dest, hsa_bb *hbb) { BrigCompareOperation8_t compare; switch (code) { case LT_EXPR: compare = BRIG_COMPARE_LT; break; case LE_EXPR: compare = BRIG_COMPARE_LE; break; case GT_EXPR: compare = BRIG_COMPARE_GT; break; case GE_EXPR: compare = BRIG_COMPARE_GE; break; case EQ_EXPR: compare = BRIG_COMPARE_EQ; break; case NE_EXPR: compare = BRIG_COMPARE_NE; break; case UNORDERED_EXPR: compare = BRIG_COMPARE_NAN; break; case ORDERED_EXPR: compare = BRIG_COMPARE_NUM; break; case UNLT_EXPR: compare = BRIG_COMPARE_LTU; break; case UNLE_EXPR: compare = BRIG_COMPARE_LEU; break; case UNGT_EXPR: compare = BRIG_COMPARE_GTU; break; case UNGE_EXPR: compare = BRIG_COMPARE_GEU; break; case UNEQ_EXPR: compare = BRIG_COMPARE_EQU; break; case LTGT_EXPR: compare = BRIG_COMPARE_NEU; break; default: HSA_SORRY_ATV (EXPR_LOCATION (lhs), "support for HSA does not implement comparison tree " "code %s", get_tree_code_name (code)); return; } /* CMP instruction returns e.g. 0xffffffff (for a 32-bit with integer) as a result of comparison. */ BrigType16_t dest_type = hsa_type_integer_p (dest->m_type) ? (BrigType16_t) BRIG_TYPE_B1 : dest->m_type; hsa_insn_cmp *cmp = new hsa_insn_cmp (compare, dest_type); hsa_op_with_type *op1 = hsa_reg_or_immed_for_gimple_op (lhs, hbb); cmp->set_op (1, op1->extend_int_to_32bit (hbb)); hsa_op_with_type *op2 = hsa_reg_or_immed_for_gimple_op (rhs, hbb); cmp->set_op (2, op2->extend_int_to_32bit (hbb)); hbb->append_insn (cmp); cmp->set_output_in_type (dest, 0, hbb); } /* Generate an unary instruction with OPCODE and append it to a basic block HBB. The instruction uses DEST as a destination and OP1 as a single operand. */ static void gen_hsa_unary_operation (BrigOpcode opcode, hsa_op_reg *dest, hsa_op_with_type *op1, hsa_bb *hbb) { gcc_checking_assert (dest); hsa_insn_basic *insn; if (opcode == BRIG_OPCODE_MOV && hsa_needs_cvt (dest->m_type, op1->m_type)) { insn = new hsa_insn_cvt (dest, op1); hbb->append_insn (insn); return; } op1 = op1->extend_int_to_32bit (hbb); if (opcode == BRIG_OPCODE_FIRSTBIT || opcode == BRIG_OPCODE_LASTBIT) { BrigType16_t srctype = hsa_type_integer_p (op1->m_type) ? op1->m_type : hsa_unsigned_type_for_type (op1->m_type); insn = new hsa_insn_srctype (2, opcode, BRIG_TYPE_U32, srctype, NULL, op1); } else { BrigType16_t optype = hsa_extend_inttype_to_32bit (dest->m_type); insn = new hsa_insn_basic (2, opcode, optype, NULL, op1); if (opcode == BRIG_OPCODE_MOV) hsa_fixup_mov_insn_type (insn); else if (opcode == BRIG_OPCODE_ABS || opcode == BRIG_OPCODE_NEG) { /* ABS and NEG only exist in _s form :-/ */ if (insn->m_type == BRIG_TYPE_U32) insn->m_type = BRIG_TYPE_S32; else if (insn->m_type == BRIG_TYPE_U64) insn->m_type = BRIG_TYPE_S64; } } hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } /* Generate a binary instruction with OPCODE and append it to a basic block HBB. The instruction uses DEST as a destination and operands OP1 and OP2. */ static void gen_hsa_binary_operation (int opcode, hsa_op_reg *dest, hsa_op_with_type *op1, hsa_op_with_type *op2, hsa_bb *hbb) { gcc_checking_assert (dest); BrigType16_t optype = hsa_extend_inttype_to_32bit (dest->m_type); op1 = op1->extend_int_to_32bit (hbb); op2 = op2->extend_int_to_32bit (hbb); if ((opcode == BRIG_OPCODE_SHL || opcode == BRIG_OPCODE_SHR) && is_a <hsa_op_immed *> (op2)) { hsa_op_immed *i = dyn_cast <hsa_op_immed *> (op2); i->set_type (BRIG_TYPE_U32); } if ((opcode == BRIG_OPCODE_OR || opcode == BRIG_OPCODE_XOR || opcode == BRIG_OPCODE_AND) && is_a <hsa_op_immed *> (op2)) { hsa_op_immed *i = dyn_cast <hsa_op_immed *> (op2); i->set_type (hsa_unsigned_type_for_type (i->m_type)); } hsa_insn_basic *insn = new hsa_insn_basic (3, opcode, optype, NULL, op1, op2); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } /* Generate HSA instructions for a single assignment. HBB is the basic block they will be appended to. */ static void gen_hsa_insns_for_operation_assignment (gimple *assign, hsa_bb *hbb) { tree_code code = gimple_assign_rhs_code (assign); gimple_rhs_class rhs_class = get_gimple_rhs_class (gimple_expr_code (assign)); tree lhs = gimple_assign_lhs (assign); tree rhs1 = gimple_assign_rhs1 (assign); tree rhs2 = gimple_assign_rhs2 (assign); tree rhs3 = gimple_assign_rhs3 (assign); BrigOpcode opcode; switch (code) { CASE_CONVERT: case FLOAT_EXPR: /* The opcode is changed to BRIG_OPCODE_CVT if BRIG types needs a conversion. */ opcode = BRIG_OPCODE_MOV; break; case PLUS_EXPR: case POINTER_PLUS_EXPR: opcode = BRIG_OPCODE_ADD; break; case MINUS_EXPR: opcode = BRIG_OPCODE_SUB; break; case MULT_EXPR: opcode = BRIG_OPCODE_MUL; break; case MULT_HIGHPART_EXPR: opcode = BRIG_OPCODE_MULHI; break; case RDIV_EXPR: case TRUNC_DIV_EXPR: case EXACT_DIV_EXPR: opcode = BRIG_OPCODE_DIV; break; case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: HSA_SORRY_AT (gimple_location (assign), "support for HSA does not implement %<CEIL_DIV_EXPR%>, " "%<FLOOR_DIV_EXPR%> or %<ROUND_DIV_EXPR%>"); return; case TRUNC_MOD_EXPR: opcode = BRIG_OPCODE_REM; break; case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: HSA_SORRY_AT (gimple_location (assign), "support for HSA does not implement %<CEIL_MOD_EXPR%>, " "%<FLOOR_MOD_EXPR%> or %<ROUND_MOD_EXPR%>"); return; case NEGATE_EXPR: opcode = BRIG_OPCODE_NEG; break; case MIN_EXPR: opcode = BRIG_OPCODE_MIN; break; case MAX_EXPR: opcode = BRIG_OPCODE_MAX; break; case ABS_EXPR: opcode = BRIG_OPCODE_ABS; break; case LSHIFT_EXPR: opcode = BRIG_OPCODE_SHL; break; case RSHIFT_EXPR: opcode = BRIG_OPCODE_SHR; break; case LROTATE_EXPR: case RROTATE_EXPR: { hsa_insn_basic *insn = NULL; int code1 = code == LROTATE_EXPR ? BRIG_OPCODE_SHL : BRIG_OPCODE_SHR; int code2 = code != LROTATE_EXPR ? BRIG_OPCODE_SHL : BRIG_OPCODE_SHR; BrigType16_t btype = hsa_type_for_scalar_tree_type (TREE_TYPE (lhs), true); hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); hsa_op_reg *op1 = new hsa_op_reg (btype); hsa_op_reg *op2 = new hsa_op_reg (btype); hsa_op_with_type *shift1 = hsa_reg_or_immed_for_gimple_op (rhs2, hbb); tree type = TREE_TYPE (rhs2); unsigned HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (TYPE_SIZE (type)); hsa_op_with_type *shift2 = NULL; if (TREE_CODE (rhs2) == INTEGER_CST) shift2 = new hsa_op_immed (bitsize - tree_to_uhwi (rhs2), BRIG_TYPE_U32); else if (TREE_CODE (rhs2) == SSA_NAME) { hsa_op_reg *s = hsa_cfun->reg_for_gimple_ssa (rhs2); s = as_a <hsa_op_reg *> (s->extend_int_to_32bit (hbb)); hsa_op_reg *d = new hsa_op_reg (s->m_type); hsa_op_immed *size_imm = new hsa_op_immed (bitsize, BRIG_TYPE_U32); insn = new hsa_insn_basic (3, BRIG_OPCODE_SUB, d->m_type, d, s, size_imm); hbb->append_insn (insn); shift2 = d; } else gcc_unreachable (); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); gen_hsa_binary_operation (code1, op1, src, shift1, hbb); gen_hsa_binary_operation (code2, op2, src, shift2, hbb); gen_hsa_binary_operation (BRIG_OPCODE_OR, dest, op1, op2, hbb); return; } case BIT_IOR_EXPR: opcode = BRIG_OPCODE_OR; break; case BIT_XOR_EXPR: opcode = BRIG_OPCODE_XOR; break; case BIT_AND_EXPR: opcode = BRIG_OPCODE_AND; break; case BIT_NOT_EXPR: opcode = BRIG_OPCODE_NOT; break; case FIX_TRUNC_EXPR: { hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *v = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); if (hsa_needs_cvt (dest->m_type, v->m_type)) { hsa_op_reg *tmp = new hsa_op_reg (v->m_type); hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_TRUNC, tmp->m_type, tmp, v); hbb->append_insn (insn); hsa_insn_basic *cvtinsn = new hsa_insn_cvt (dest, tmp); hbb->append_insn (cvtinsn); } else { hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_TRUNC, dest->m_type, dest, v); hbb->append_insn (insn); } return; } opcode = BRIG_OPCODE_TRUNC; break; case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: { hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (gimple_assign_lhs (assign)); gen_hsa_cmp_insn_from_gimple (code, rhs1, rhs2, dest, hbb); return; } case COND_EXPR: { hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (gimple_assign_lhs (assign)); hsa_op_with_type *ctrl = NULL; tree cond = rhs1; if (CONSTANT_CLASS_P (cond) || TREE_CODE (cond) == SSA_NAME) ctrl = hsa_reg_or_immed_for_gimple_op (cond, hbb); else { hsa_op_reg *r = new hsa_op_reg (BRIG_TYPE_B1); gen_hsa_cmp_insn_from_gimple (TREE_CODE (cond), TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1), r, hbb); ctrl = r; } hsa_op_with_type *op2 = hsa_reg_or_immed_for_gimple_op (rhs2, hbb); hsa_op_with_type *op3 = hsa_reg_or_immed_for_gimple_op (rhs3, hbb); op2 = op2->extend_int_to_32bit (hbb); op3 = op3->extend_int_to_32bit (hbb); BrigType16_t type = hsa_extend_inttype_to_32bit (dest->m_type); BrigType16_t utype = hsa_unsigned_type_for_type (type); if (is_a <hsa_op_immed *> (op2)) op2->m_type = utype; if (is_a <hsa_op_immed *> (op3)) op3->m_type = utype; hsa_insn_basic *insn = new hsa_insn_basic (4, BRIG_OPCODE_CMOV, hsa_bittype_for_type (type), NULL, ctrl, op2, op3); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); return; } case COMPLEX_EXPR: { hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (gimple_assign_lhs (assign)); hsa_op_with_type *rhs1_reg = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); rhs1_reg = rhs1_reg->extend_int_to_32bit (hbb); hsa_op_with_type *rhs2_reg = hsa_reg_or_immed_for_gimple_op (rhs2, hbb); rhs2_reg = rhs2_reg->extend_int_to_32bit (hbb); if (hsa_seen_error ()) return; BrigType16_t src_type = hsa_bittype_for_type (rhs1_reg->m_type); rhs1_reg = rhs1_reg->get_in_type (src_type, hbb); rhs2_reg = rhs2_reg->get_in_type (src_type, hbb); hsa_insn_packed *insn = new hsa_insn_packed (3, BRIG_OPCODE_COMBINE, dest->m_type, src_type, dest, rhs1_reg, rhs2_reg); hbb->append_insn (insn); return; } default: /* Implement others as we come across them. */ HSA_SORRY_ATV (gimple_location (assign), "support for HSA does not implement operation %s", get_tree_code_name (code)); return; } hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *op1 = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); hsa_op_with_type *op2 = rhs2 ? hsa_reg_or_immed_for_gimple_op (rhs2, hbb) : NULL; if (hsa_seen_error ()) return; switch (rhs_class) { case GIMPLE_TERNARY_RHS: { hsa_op_with_type *op3 = hsa_reg_or_immed_for_gimple_op (rhs3, hbb); op3 = op3->extend_int_to_32bit (hbb); hsa_insn_basic *insn = new hsa_insn_basic (4, opcode, dest->m_type, dest, op1, op2, op3); hbb->append_insn (insn); } return; case GIMPLE_BINARY_RHS: gen_hsa_binary_operation (opcode, dest, op1, op2, hbb); break; case GIMPLE_UNARY_RHS: gen_hsa_unary_operation (opcode, dest, op1, hbb); break; default: gcc_unreachable (); } } /* Generate HSA instructions for a given gimple condition statement COND. Instructions will be appended to HBB, which also needs to be the corresponding structure to the basic_block of COND. */ static void gen_hsa_insns_for_cond_stmt (gimple *cond, hsa_bb *hbb) { hsa_op_reg *ctrl = new hsa_op_reg (BRIG_TYPE_B1); hsa_insn_cbr *cbr; gen_hsa_cmp_insn_from_gimple (gimple_cond_code (cond), gimple_cond_lhs (cond), gimple_cond_rhs (cond), ctrl, hbb); cbr = new hsa_insn_cbr (ctrl); hbb->append_insn (cbr); } /* Maximum number of elements in a jump table for an HSA SBR instruction. */ #define HSA_MAXIMUM_SBR_LABELS 16 /* Return lowest value of a switch S that is handled in a non-default label. */ static tree get_switch_low (gswitch *s) { unsigned labels = gimple_switch_num_labels (s); gcc_checking_assert (labels >= 1); return CASE_LOW (gimple_switch_label (s, 1)); } /* Return highest value of a switch S that is handled in a non-default label. */ static tree get_switch_high (gswitch *s) { unsigned labels = gimple_switch_num_labels (s); /* Compare last label to maximum number of labels. */ tree label = gimple_switch_label (s, labels - 1); tree low = CASE_LOW (label); tree high = CASE_HIGH (label); return high != NULL_TREE ? high : low; } static tree get_switch_size (gswitch *s) { return int_const_binop (MINUS_EXPR, get_switch_high (s), get_switch_low (s)); } /* Generate HSA instructions for a given gimple switch. Instructions will be appended to HBB. */ static void gen_hsa_insns_for_switch_stmt (gswitch *s, hsa_bb *hbb) { gimple_stmt_iterator it = gsi_for_stmt (s); gsi_prev (&it); /* Create preambule that verifies that index - lowest_label >= 0. */ edge e = split_block (hbb->m_bb, gsi_stmt (it)); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_TRUE_VALUE; tree index_tree = gimple_switch_index (s); tree lowest = get_switch_low (s); tree highest = get_switch_high (s); hsa_op_reg *index = hsa_cfun->reg_for_gimple_ssa (index_tree); index = as_a <hsa_op_reg *> (index->extend_int_to_32bit (hbb)); hsa_op_reg *cmp1_reg = new hsa_op_reg (BRIG_TYPE_B1); hsa_op_immed *cmp1_immed = new hsa_op_immed (lowest, true); hbb->append_insn (new hsa_insn_cmp (BRIG_COMPARE_GE, cmp1_reg->m_type, cmp1_reg, index, cmp1_immed)); hsa_op_reg *cmp2_reg = new hsa_op_reg (BRIG_TYPE_B1); hsa_op_immed *cmp2_immed = new hsa_op_immed (highest, true); hbb->append_insn (new hsa_insn_cmp (BRIG_COMPARE_LE, cmp2_reg->m_type, cmp2_reg, index, cmp2_immed)); hsa_op_reg *cmp_reg = new hsa_op_reg (BRIG_TYPE_B1); hbb->append_insn (new hsa_insn_basic (3, BRIG_OPCODE_AND, cmp_reg->m_type, cmp_reg, cmp1_reg, cmp2_reg)); hbb->append_insn (new hsa_insn_cbr (cmp_reg)); basic_block default_label_bb = gimple_switch_default_bb (cfun, s); if (!gimple_seq_empty_p (phi_nodes (default_label_bb))) { default_label_bb = split_edge (find_edge (e->dest, default_label_bb)); hsa_init_new_bb (default_label_bb); } make_edge (e->src, default_label_bb, EDGE_FALSE_VALUE); hsa_cfun->m_modified_cfg = true; /* Basic block with the SBR instruction. */ hbb = hsa_init_new_bb (e->dest); hsa_op_reg *sub_index = new hsa_op_reg (index->m_type); hbb->append_insn (new hsa_insn_basic (3, BRIG_OPCODE_SUB, sub_index->m_type, sub_index, index, new hsa_op_immed (lowest, true))); hsa_op_base *tmp = sub_index->get_in_type (BRIG_TYPE_U64, hbb); sub_index = as_a <hsa_op_reg *> (tmp); unsigned labels = gimple_switch_num_labels (s); unsigned HOST_WIDE_INT size = tree_to_uhwi (get_switch_size (s)); hsa_insn_sbr *sbr = new hsa_insn_sbr (sub_index, size + 1); /* Prepare array with default label destination. */ for (unsigned HOST_WIDE_INT i = 0; i <= size; i++) sbr->m_jump_table.safe_push (default_label_bb); /* Iterate all labels and fill up the jump table. */ for (unsigned i = 1; i < labels; i++) { tree label = gimple_switch_label (s, i); basic_block bb = label_to_block (cfun, CASE_LABEL (label)); unsigned HOST_WIDE_INT sub_low = tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_LOW (label), lowest)); unsigned HOST_WIDE_INT sub_high = sub_low; tree high = CASE_HIGH (label); if (high != NULL) sub_high = tree_to_uhwi (int_const_binop (MINUS_EXPR, high, lowest)); for (unsigned HOST_WIDE_INT j = sub_low; j <= sub_high; j++) sbr->m_jump_table[j] = bb; } hbb->append_insn (sbr); } /* Verify that the function DECL can be handled by HSA. */ static void verify_function_arguments (tree decl) { tree type = TREE_TYPE (decl); if (DECL_STATIC_CHAIN (decl)) { HSA_SORRY_ATV (EXPR_LOCATION (decl), "HSA does not support nested functions: %qD", decl); return; } else if (!TYPE_ARG_TYPES (type) || stdarg_p (type)) { HSA_SORRY_ATV (EXPR_LOCATION (decl), "HSA does not support functions with variadic arguments " "(or unknown return type): %qD", decl); return; } } /* Return BRIG type for FORMAL_ARG_TYPE. If the formal argument type is NULL, return ACTUAL_ARG_TYPE. */ static BrigType16_t get_format_argument_type (tree formal_arg_type, BrigType16_t actual_arg_type) { if (formal_arg_type == NULL) return actual_arg_type; BrigType16_t decl_type = hsa_type_for_scalar_tree_type (formal_arg_type, false); return mem_type_for_type (decl_type); } /* Generate HSA instructions for a direct call instruction. Instructions will be appended to HBB, which also needs to be the corresponding structure to the basic_block of STMT. If ASSIGN_LHS is false, do not copy HSA function result argument into the corresponding HSA representation of the gimple statement LHS. */ static void gen_hsa_insns_for_direct_call (gimple *stmt, hsa_bb *hbb, bool assign_lhs = true) { tree decl = gimple_call_fndecl (stmt); verify_function_arguments (decl); if (hsa_seen_error ()) return; hsa_insn_call *call_insn = new hsa_insn_call (decl); hsa_cfun->m_called_functions.safe_push (call_insn->m_called_function); /* Argument block start. */ hsa_insn_arg_block *arg_start = new hsa_insn_arg_block (BRIG_KIND_DIRECTIVE_ARG_BLOCK_START, call_insn); hbb->append_insn (arg_start); tree parm_type_chain = TYPE_ARG_TYPES (gimple_call_fntype (stmt)); /* Preparation of arguments that will be passed to function. */ const unsigned args = gimple_call_num_args (stmt); for (unsigned i = 0; i < args; ++i) { tree parm = gimple_call_arg (stmt, (int)i); tree parm_decl_type = parm_type_chain != NULL_TREE ? TREE_VALUE (parm_type_chain) : NULL_TREE; hsa_op_address *addr; if (AGGREGATE_TYPE_P (TREE_TYPE (parm))) { addr = gen_hsa_addr_for_arg (TREE_TYPE (parm), i); BrigAlignment8_t align; hsa_op_address *src = gen_hsa_addr_with_align (parm, hbb, &align); gen_hsa_memory_copy (hbb, addr, src, addr->m_symbol->total_byte_size (), align); } else { hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (parm, hbb); if (parm_decl_type != NULL && AGGREGATE_TYPE_P (parm_decl_type)) { HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement an aggregate " "formal argument in a function call, while actual " "argument is not an aggregate"); return; } BrigType16_t formal_arg_type = get_format_argument_type (parm_decl_type, src->m_type); if (hsa_seen_error ()) return; if (src->m_type != formal_arg_type) src = src->get_in_type (formal_arg_type, hbb); addr = gen_hsa_addr_for_arg (parm_decl_type != NULL_TREE ? parm_decl_type: TREE_TYPE (parm), i); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, formal_arg_type, src, addr); hbb->append_insn (mem); } call_insn->m_input_args.safe_push (addr->m_symbol); if (parm_type_chain) parm_type_chain = TREE_CHAIN (parm_type_chain); } call_insn->m_args_code_list = new hsa_op_code_list (args); hbb->append_insn (call_insn); tree result_type = TREE_TYPE (TREE_TYPE (decl)); tree result = gimple_call_lhs (stmt); hsa_insn_mem *result_insn = NULL; if (!VOID_TYPE_P (result_type)) { hsa_op_address *addr = gen_hsa_addr_for_arg (result_type, -1); /* Even if result of a function call is unused, we have to emit declaration for the result. */ if (result && assign_lhs) { tree lhs_type = TREE_TYPE (result); if (hsa_seen_error ()) return; if (AGGREGATE_TYPE_P (lhs_type)) { BrigAlignment8_t align; hsa_op_address *result_addr = gen_hsa_addr_with_align (result, hbb, &align); gen_hsa_memory_copy (hbb, result_addr, addr, addr->m_symbol->total_byte_size (), align); } else { BrigType16_t mtype = mem_type_for_type (hsa_type_for_scalar_tree_type (lhs_type, false)); hsa_op_reg *dst = hsa_cfun->reg_for_gimple_ssa (result); result_insn = new hsa_insn_mem (BRIG_OPCODE_LD, mtype, dst, addr); hbb->append_insn (result_insn); } } call_insn->m_output_arg = addr->m_symbol; call_insn->m_result_code_list = new hsa_op_code_list (1); } else { if (result) { HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement an assignment of " "return value from a void function"); return; } call_insn->m_result_code_list = new hsa_op_code_list (0); } /* Argument block end. */ hsa_insn_arg_block *arg_end = new hsa_insn_arg_block (BRIG_KIND_DIRECTIVE_ARG_BLOCK_END, call_insn); hbb->append_insn (arg_end); } /* Generate HSA instructions for a direct call of an internal fn. Instructions will be appended to HBB, which also needs to be the corresponding structure to the basic_block of STMT. */ static void gen_hsa_insns_for_call_of_internal_fn (gimple *stmt, hsa_bb *hbb) { tree lhs = gimple_call_lhs (stmt); if (!lhs) return; tree lhs_type = TREE_TYPE (lhs); tree rhs1 = gimple_call_arg (stmt, 0); tree rhs1_type = TREE_TYPE (rhs1); enum internal_fn fn = gimple_call_internal_fn (stmt); hsa_internal_fn *ifn = new hsa_internal_fn (fn, tree_to_uhwi (TYPE_SIZE (rhs1_type))); hsa_insn_call *call_insn = new hsa_insn_call (ifn); gcc_checking_assert (FLOAT_TYPE_P (rhs1_type)); if (!hsa_emitted_internal_decls->find (call_insn->m_called_internal_fn)) hsa_cfun->m_called_internal_fns.safe_push (call_insn->m_called_internal_fn); hsa_insn_arg_block *arg_start = new hsa_insn_arg_block (BRIG_KIND_DIRECTIVE_ARG_BLOCK_START, call_insn); hbb->append_insn (arg_start); unsigned num_args = gimple_call_num_args (stmt); /* Function arguments. */ for (unsigned i = 0; i < num_args; i++) { tree parm = gimple_call_arg (stmt, (int)i); hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (parm, hbb); hsa_op_address *addr = gen_hsa_addr_for_arg (TREE_TYPE (parm), i); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, src->m_type, src, addr); call_insn->m_input_args.safe_push (addr->m_symbol); hbb->append_insn (mem); } call_insn->m_args_code_list = new hsa_op_code_list (num_args); hbb->append_insn (call_insn); /* Assign returned value. */ hsa_op_address *addr = gen_hsa_addr_for_arg (lhs_type, -1); call_insn->m_output_arg = addr->m_symbol; call_insn->m_result_code_list = new hsa_op_code_list (1); /* Argument block end. */ hsa_insn_arg_block *arg_end = new hsa_insn_arg_block (BRIG_KIND_DIRECTIVE_ARG_BLOCK_END, call_insn); hbb->append_insn (arg_end); } /* Generate HSA instructions for a return value instruction. Instructions will be appended to HBB, which also needs to be the corresponding structure to the basic_block of STMT. */ static void gen_hsa_insns_for_return (greturn *stmt, hsa_bb *hbb) { tree retval = gimple_return_retval (stmt); if (retval) { hsa_op_address *addr = new hsa_op_address (hsa_cfun->m_output_arg); if (AGGREGATE_TYPE_P (TREE_TYPE (retval))) { BrigAlignment8_t align; hsa_op_address *retval_addr = gen_hsa_addr_with_align (retval, hbb, &align); gen_hsa_memory_copy (hbb, addr, retval_addr, hsa_cfun->m_output_arg->total_byte_size (), align); } else { BrigType16_t t = hsa_type_for_scalar_tree_type (TREE_TYPE (retval), false); BrigType16_t mtype = mem_type_for_type (t); /* Store of return value. */ hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (retval, hbb); src = src->get_in_type (mtype, hbb); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, mtype, src, addr); hbb->append_insn (mem); } } /* HSAIL return instruction emission. */ hsa_insn_basic *ret = new hsa_insn_basic (0, BRIG_OPCODE_RET); hbb->append_insn (ret); } /* Set OP_INDEX-th operand of the instruction to DEST, as the DEST can have a different type, conversion instructions are possibly appended to HBB. */ void hsa_insn_basic::set_output_in_type (hsa_op_reg *dest, unsigned op_index, hsa_bb *hbb) { gcc_checking_assert (op_output_p (op_index)); if (dest->m_type == m_type) { set_op (op_index, dest); return; } hsa_insn_basic *insn; hsa_op_reg *tmp; if (hsa_needs_cvt (dest->m_type, m_type)) { tmp = new hsa_op_reg (m_type); insn = new hsa_insn_cvt (dest, tmp); } else if (hsa_type_bit_size (dest->m_type) == hsa_type_bit_size (m_type)) { /* When output, HSA registers do not really have types, only sizes, so if the sizes match, we can use the register directly. */ set_op (op_index, dest); return; } else { tmp = new hsa_op_reg (m_type); insn = new hsa_insn_basic (2, BRIG_OPCODE_MOV, dest->m_type, dest, tmp->get_in_type (dest->m_type, hbb)); hsa_fixup_mov_insn_type (insn); } set_op (op_index, tmp); hbb->append_insn (insn); } /* Generate instruction OPCODE to query a property of HSA grid along the given DIMENSION. Store result into DEST and append the instruction to HBB. */ static void query_hsa_grid_dim (hsa_op_reg *dest, int opcode, hsa_op_immed *dimension, hsa_bb *hbb) { hsa_insn_basic *insn = new hsa_insn_basic (2, opcode, BRIG_TYPE_U32, NULL, dimension); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } /* Generate instruction OPCODE to query a property of HSA grid along the given dimension which is an immediate in first argument of STMT. Store result into the register corresponding to LHS of STMT and append the instruction to HBB. */ static void query_hsa_grid_dim (gimple *stmt, int opcode, hsa_bb *hbb) { tree lhs = gimple_call_lhs (dyn_cast <gcall *> (stmt)); if (lhs == NULL_TREE) return; tree arg = gimple_call_arg (stmt, 0); unsigned HOST_WIDE_INT dim = 5; if (tree_fits_uhwi_p (arg)) dim = tree_to_uhwi (arg); if (dim > 2) { HSA_SORRY_AT (gimple_location (stmt), "HSA grid query dimension must be immediate constant 0, 1 " "or 2"); return; } hsa_op_immed *hdim = new hsa_op_immed (dim, (BrigKind16_t) BRIG_TYPE_U32); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); query_hsa_grid_dim (dest, opcode, hdim, hbb); } /* Generate instruction OPCODE to query a property of HSA grid that is independent of any dimension. Store result into the register corresponding to LHS of STMT and append the instruction to HBB. */ static void query_hsa_grid_nodim (gimple *stmt, BrigOpcode16_t opcode, hsa_bb *hbb) { tree lhs = gimple_call_lhs (dyn_cast <gcall *> (stmt)); if (lhs == NULL_TREE) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); BrigType16_t brig_type = hsa_unsigned_type_for_type (dest->m_type); hsa_insn_basic *insn = new hsa_insn_basic (1, opcode, brig_type, dest); hbb->append_insn (insn); } /* Emit instructions that set hsa_num_threads according to provided VALUE. Instructions are appended to basic block HBB. */ static void gen_set_num_threads (tree value, hsa_bb *hbb) { hbb->append_insn (new hsa_insn_comment ("omp_set_num_threads")); hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (value, hbb); src = src->get_in_type (hsa_num_threads->m_type, hbb); hsa_op_address *addr = new hsa_op_address (hsa_num_threads); hsa_insn_basic *basic = new hsa_insn_mem (BRIG_OPCODE_ST, hsa_num_threads->m_type, src, addr); hbb->append_insn (basic); } /* Return byte offset of a FIELD_NAME in GOMP_hsa_kernel_dispatch which is defined in plugin-hsa.c. */ static HOST_WIDE_INT get_hsa_kernel_dispatch_offset (const char *field_name) { tree *hsa_kernel_dispatch_type = hsa_get_kernel_dispatch_type (); if (*hsa_kernel_dispatch_type == NULL) { /* Collection of information needed for a dispatch of a kernel from a kernel. Keep in sync with libgomp's plugin-hsa.c. */ *hsa_kernel_dispatch_type = make_node (RECORD_TYPE); tree id_f1 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("queue"), ptr_type_node); DECL_CHAIN (id_f1) = NULL_TREE; tree id_f2 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("omp_data_memory"), ptr_type_node); DECL_CHAIN (id_f2) = id_f1; tree id_f3 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("kernarg_address"), ptr_type_node); DECL_CHAIN (id_f3) = id_f2; tree id_f4 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("object"), uint64_type_node); DECL_CHAIN (id_f4) = id_f3; tree id_f5 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("signal"), uint64_type_node); DECL_CHAIN (id_f5) = id_f4; tree id_f6 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("private_segment_size"), uint32_type_node); DECL_CHAIN (id_f6) = id_f5; tree id_f7 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_segment_size"), uint32_type_node); DECL_CHAIN (id_f7) = id_f6; tree id_f8 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("kernel_dispatch_count"), uint64_type_node); DECL_CHAIN (id_f8) = id_f7; tree id_f9 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("debug"), uint64_type_node); DECL_CHAIN (id_f9) = id_f8; tree id_f10 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("omp_level"), uint64_type_node); DECL_CHAIN (id_f10) = id_f9; tree id_f11 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("children_dispatches"), ptr_type_node); DECL_CHAIN (id_f11) = id_f10; tree id_f12 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("omp_num_threads"), uint32_type_node); DECL_CHAIN (id_f12) = id_f11; finish_builtin_struct (*hsa_kernel_dispatch_type, "__hsa_kernel_dispatch", id_f12, NULL_TREE); TYPE_ARTIFICIAL (*hsa_kernel_dispatch_type) = 1; } for (tree chain = TYPE_FIELDS (*hsa_kernel_dispatch_type); chain != NULL_TREE; chain = TREE_CHAIN (chain)) if (id_equal (DECL_NAME (chain), field_name)) return int_byte_position (chain); gcc_unreachable (); } /* Return an HSA register that will contain number of threads for a future dispatched kernel. Instructions are added to HBB. */ static hsa_op_reg * gen_num_threads_for_dispatch (hsa_bb *hbb) { /* Step 1) Assign to number of threads: MIN (HSA_DEFAULT_NUM_THREADS, hsa_num_threads). */ hsa_op_reg *threads = new hsa_op_reg (hsa_num_threads->m_type); hsa_op_address *addr = new hsa_op_address (hsa_num_threads); hbb->append_insn (new hsa_insn_mem (BRIG_OPCODE_LD, threads->m_type, threads, addr)); hsa_op_immed *limit = new hsa_op_immed (HSA_DEFAULT_NUM_THREADS, BRIG_TYPE_U32); hsa_op_reg *r = new hsa_op_reg (BRIG_TYPE_B1); hsa_insn_cmp * cmp = new hsa_insn_cmp (BRIG_COMPARE_LT, r->m_type, r, threads, limit); hbb->append_insn (cmp); BrigType16_t btype = hsa_bittype_for_type (threads->m_type); hsa_op_reg *tmp = new hsa_op_reg (threads->m_type); hbb->append_insn (new hsa_insn_basic (4, BRIG_OPCODE_CMOV, btype, tmp, r, threads, limit)); /* Step 2) If the number is equal to zero, return shadow->omp_num_threads. */ hsa_op_reg *shadow_reg_ptr = hsa_cfun->get_shadow_reg (); hsa_op_reg *shadow_thread_count = new hsa_op_reg (BRIG_TYPE_U32); addr = new hsa_op_address (shadow_reg_ptr, get_hsa_kernel_dispatch_offset ("omp_num_threads")); hsa_insn_basic *basic = new hsa_insn_mem (BRIG_OPCODE_LD, shadow_thread_count->m_type, shadow_thread_count, addr); hbb->append_insn (basic); hsa_op_reg *tmp2 = new hsa_op_reg (threads->m_type); r = new hsa_op_reg (BRIG_TYPE_B1); hsa_op_immed *imm = new hsa_op_immed (0, shadow_thread_count->m_type); hbb->append_insn (new hsa_insn_cmp (BRIG_COMPARE_EQ, r->m_type, r, tmp, imm)); hbb->append_insn (new hsa_insn_basic (4, BRIG_OPCODE_CMOV, btype, tmp2, r, shadow_thread_count, tmp)); hsa_op_base *dest = tmp2->get_in_type (BRIG_TYPE_U16, hbb); return as_a <hsa_op_reg *> (dest); } /* Build OPCODE query for all three hsa dimensions, multiply them and store the result into DEST. */ static void multiply_grid_dim_characteristics (hsa_op_reg *dest, int opcode, hsa_bb *hbb) { hsa_op_reg *dimx = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (dimx, opcode, new hsa_op_immed (0, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *dimy = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (dimy, opcode, new hsa_op_immed (1, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *dimz = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (dimz, opcode, new hsa_op_immed (2, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *tmp = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_MUL, tmp, dimx->get_in_type (dest->m_type, hbb), dimy->get_in_type (dest->m_type, hbb), hbb); gen_hsa_binary_operation (BRIG_OPCODE_MUL, dest, tmp, dimz->get_in_type (dest->m_type, hbb), hbb); } /* Emit instructions that assign number of threads to lhs of gimple STMT. Instructions are appended to basic block HBB. */ static void gen_get_num_threads (gimple *stmt, hsa_bb *hbb) { if (gimple_call_lhs (stmt) == NULL_TREE) return; hbb->append_insn (new hsa_insn_comment ("omp_get_num_threads")); tree lhs = gimple_call_lhs (stmt); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); multiply_grid_dim_characteristics (dest, BRIG_OPCODE_CURRENTWORKGROUPSIZE, hbb); } /* Emit instructions that assign number of teams to lhs of gimple STMT. Instructions are appended to basic block HBB. */ static void gen_get_num_teams (gimple *stmt, hsa_bb *hbb) { if (gimple_call_lhs (stmt) == NULL_TREE) return; hbb->append_insn (new hsa_insn_comment ("omp_get_num_teams")); tree lhs = gimple_call_lhs (stmt); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); multiply_grid_dim_characteristics (dest, BRIG_OPCODE_GRIDGROUPS, hbb); } /* Emit instructions that assign a team number to lhs of gimple STMT. Instructions are appended to basic block HBB. */ static void gen_get_team_num (gimple *stmt, hsa_bb *hbb) { if (gimple_call_lhs (stmt) == NULL_TREE) return; hbb->append_insn (new hsa_insn_comment ("omp_get_team_num")); tree lhs = gimple_call_lhs (stmt); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_reg *gnum_x = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (gnum_x, BRIG_OPCODE_GRIDGROUPS, new hsa_op_immed (0, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *gnum_y = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (gnum_y, BRIG_OPCODE_GRIDGROUPS, new hsa_op_immed (1, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *gno_z = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (gno_z, BRIG_OPCODE_WORKGROUPID, new hsa_op_immed (2, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *tmp1 = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_MUL, tmp1, gnum_x->get_in_type (dest->m_type, hbb), gnum_y->get_in_type (dest->m_type, hbb), hbb); hsa_op_reg *tmp2 = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_MUL, tmp2, tmp1, gno_z->get_in_type (dest->m_type, hbb), hbb); hsa_op_reg *gno_y = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (gno_y, BRIG_OPCODE_WORKGROUPID, new hsa_op_immed (1, (BrigKind16_t) BRIG_TYPE_U32), hbb); hsa_op_reg *tmp3 = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_MUL, tmp3, gnum_x->get_in_type (dest->m_type, hbb), gno_y->get_in_type (dest->m_type, hbb), hbb); hsa_op_reg *tmp4 = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_ADD, tmp4, tmp3, tmp2, hbb); hsa_op_reg *gno_x = new hsa_op_reg (BRIG_TYPE_U32); query_hsa_grid_dim (gno_x, BRIG_OPCODE_WORKGROUPID, new hsa_op_immed (0, (BrigKind16_t) BRIG_TYPE_U32), hbb); gen_hsa_binary_operation (BRIG_OPCODE_ADD, dest, tmp4, gno_x->get_in_type (dest->m_type, hbb), hbb); } /* Emit instructions that get levels-var ICV to lhs of gimple STMT. Instructions are appended to basic block HBB. */ static void gen_get_level (gimple *stmt, hsa_bb *hbb) { if (gimple_call_lhs (stmt) == NULL_TREE) return; hbb->append_insn (new hsa_insn_comment ("omp_get_level")); tree lhs = gimple_call_lhs (stmt); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_reg *shadow_reg_ptr = hsa_cfun->get_shadow_reg (); if (shadow_reg_ptr == NULL) { HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement %<omp_get_level%> " "called from a function not being inlined within a kernel"); return; } hsa_op_address *addr = new hsa_op_address (shadow_reg_ptr, get_hsa_kernel_dispatch_offset ("omp_level")); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, BRIG_TYPE_U64, (hsa_op_base *) NULL, addr); hbb->append_insn (mem); mem->set_output_in_type (dest, 0, hbb); } /* Emit instruction that implement omp_get_max_threads of gimple STMT. */ static void gen_get_max_threads (gimple *stmt, hsa_bb *hbb) { tree lhs = gimple_call_lhs (stmt); if (!lhs) return; hbb->append_insn (new hsa_insn_comment ("omp_get_max_threads")); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *num_theads_reg = gen_num_threads_for_dispatch (hbb) ->get_in_type (dest->m_type, hbb); hsa_build_append_simple_mov (dest, num_theads_reg, hbb); } /* Emit instructions that implement alloca builtin gimple STMT. Instructions are appended to basic block HBB. */ static void gen_hsa_alloca (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; tree fndecl = gimple_call_fndecl (call); built_in_function fn = DECL_FUNCTION_CODE (fndecl); gcc_checking_assert (ALLOCA_FUNCTION_CODE_P (fn)); unsigned bit_alignment = 0; if (fn != BUILT_IN_ALLOCA) { tree alignment_tree = gimple_call_arg (call, 1); if (TREE_CODE (alignment_tree) != INTEGER_CST) { HSA_SORRY_ATV (gimple_location (call), "support for HSA does not implement " "%qD with a non-constant alignment %E", fndecl, alignment_tree); } bit_alignment = tree_to_uhwi (alignment_tree); } tree rhs1 = gimple_call_arg (call, 0); hsa_op_with_type *size = hsa_reg_or_immed_for_gimple_op (rhs1, hbb) ->get_in_type (BRIG_TYPE_U32, hbb); hsa_op_with_type *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_reg *tmp = new hsa_op_reg (hsa_get_segment_addr_type (BRIG_SEGMENT_PRIVATE)); hsa_insn_alloca *a = new hsa_insn_alloca (tmp, size, bit_alignment); hbb->append_insn (a); hsa_insn_seg *seg = new hsa_insn_seg (BRIG_OPCODE_STOF, hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT), tmp->m_type, BRIG_SEGMENT_PRIVATE, dest, tmp); hbb->append_insn (seg); } /* Emit instructions that implement clrsb builtin STMT: Returns the number of leading redundant sign bits in x, i.e. the number of bits following the most significant bit that are identical to it. There are no special cases for 0 or other values. Instructions are appended to basic block HBB. */ static void gen_hsa_clrsb (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); tree rhs1 = gimple_call_arg (call, 0); hsa_op_with_type *arg = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); arg->extend_int_to_32bit (hbb); BrigType16_t bittype = hsa_bittype_for_type (arg->m_type); unsigned bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs1))); /* FIRSTBIT instruction is defined just for 32 and 64-bits wide integers. */ gcc_checking_assert (bitsize == 32 || bitsize == 64); /* Set true to MOST_SIG if the most significant bit is set to one. */ hsa_op_immed *c = new hsa_op_immed (1ul << (bitsize - 1), hsa_uint_for_bitsize (bitsize)); hsa_op_reg *and_reg = new hsa_op_reg (bittype); gen_hsa_binary_operation (BRIG_OPCODE_AND, and_reg, arg, c, hbb); hsa_op_reg *most_sign = new hsa_op_reg (BRIG_TYPE_B1); hsa_insn_cmp *cmp = new hsa_insn_cmp (BRIG_COMPARE_EQ, most_sign->m_type, most_sign, and_reg, c); hbb->append_insn (cmp); /* If the most significant bit is one, negate the input. Otherwise shift the input value to left by one bit. */ hsa_op_reg *arg_neg = new hsa_op_reg (arg->m_type); gen_hsa_unary_operation (BRIG_OPCODE_NEG, arg_neg, arg, hbb); hsa_op_reg *shifted_arg = new hsa_op_reg (arg->m_type); gen_hsa_binary_operation (BRIG_OPCODE_SHL, shifted_arg, arg, new hsa_op_immed (1, BRIG_TYPE_U64), hbb); /* Assign the value that can be used for FIRSTBIT instruction according to the most significant bit. */ hsa_op_reg *tmp = new hsa_op_reg (bittype); hsa_insn_basic *cmov = new hsa_insn_basic (4, BRIG_OPCODE_CMOV, bittype, tmp, most_sign, arg_neg, shifted_arg); hbb->append_insn (cmov); hsa_op_reg *leading_bits = new hsa_op_reg (BRIG_TYPE_S32); gen_hsa_unary_operation (BRIG_OPCODE_FIRSTBIT, leading_bits, tmp->get_in_type (hsa_uint_for_bitsize (bitsize), hbb), hbb); /* Set flag if the input value is equal to zero. */ hsa_op_reg *is_zero = new hsa_op_reg (BRIG_TYPE_B1); cmp = new hsa_insn_cmp (BRIG_COMPARE_EQ, is_zero->m_type, is_zero, arg, new hsa_op_immed (0, arg->m_type)); hbb->append_insn (cmp); /* Return the number of leading bits, or (bitsize - 1) if the input value is zero. */ cmov = new hsa_insn_basic (4, BRIG_OPCODE_CMOV, BRIG_TYPE_B32, NULL, is_zero, new hsa_op_immed (bitsize - 1, BRIG_TYPE_U32), leading_bits->get_in_type (BRIG_TYPE_B32, hbb)); hbb->append_insn (cmov); cmov->set_output_in_type (dest, 0, hbb); } /* Emit instructions that implement ffs builtin STMT: Returns one plus the index of the least significant 1-bit of x, or if x is zero, returns zero. Instructions are appended to basic block HBB. */ static void gen_hsa_ffs (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); tree rhs1 = gimple_call_arg (call, 0); hsa_op_with_type *arg = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); arg = arg->extend_int_to_32bit (hbb); hsa_op_reg *tmp = new hsa_op_reg (BRIG_TYPE_U32); hsa_insn_srctype *insn = new hsa_insn_srctype (2, BRIG_OPCODE_LASTBIT, tmp->m_type, arg->m_type, tmp, arg); hbb->append_insn (insn); hsa_insn_basic *addition = new hsa_insn_basic (3, BRIG_OPCODE_ADD, tmp->m_type, NULL, tmp, new hsa_op_immed (1, tmp->m_type)); hbb->append_insn (addition); addition->set_output_in_type (dest, 0, hbb); } static void gen_hsa_popcount_to_dest (hsa_op_reg *dest, hsa_op_with_type *arg, hsa_bb *hbb) { gcc_checking_assert (hsa_type_integer_p (arg->m_type)); if (hsa_type_bit_size (arg->m_type) < 32) arg = arg->get_in_type (BRIG_TYPE_B32, hbb); BrigType16_t srctype = hsa_bittype_for_type (arg->m_type); if (!hsa_btype_p (arg->m_type)) arg = arg->get_in_type (srctype, hbb); hsa_insn_srctype *popcount = new hsa_insn_srctype (2, BRIG_OPCODE_POPCOUNT, BRIG_TYPE_U32, srctype, NULL, arg); hbb->append_insn (popcount); popcount->set_output_in_type (dest, 0, hbb); } /* Emit instructions that implement parity builtin STMT: Returns the parity of x, i.e. the number of 1-bits in x modulo 2. Instructions are appended to basic block HBB. */ static void gen_hsa_parity (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); tree rhs1 = gimple_call_arg (call, 0); hsa_op_with_type *arg = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); hsa_op_reg *popcount = new hsa_op_reg (BRIG_TYPE_U32); gen_hsa_popcount_to_dest (popcount, arg, hbb); hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_REM, popcount->m_type, NULL, popcount, new hsa_op_immed (2, popcount->m_type)); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } /* Emit instructions that implement popcount builtin STMT. Instructions are appended to basic block HBB. */ static void gen_hsa_popcount (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); tree rhs1 = gimple_call_arg (call, 0); hsa_op_with_type *arg = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); gen_hsa_popcount_to_dest (dest, arg, hbb); } /* Emit instructions that implement DIVMOD builtin STMT. Instructions are appended to basic block HBB. */ static void gen_hsa_divmod (gcall *call, hsa_bb *hbb) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; tree rhs0 = gimple_call_arg (call, 0); tree rhs1 = gimple_call_arg (call, 1); hsa_op_with_type *arg0 = hsa_reg_or_immed_for_gimple_op (rhs0, hbb); arg0 = arg0->extend_int_to_32bit (hbb); hsa_op_with_type *arg1 = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); arg1 = arg1->extend_int_to_32bit (hbb); hsa_op_reg *dest0 = new hsa_op_reg (arg0->m_type); hsa_op_reg *dest1 = new hsa_op_reg (arg1->m_type); hsa_insn_basic *insn = new hsa_insn_basic (3, BRIG_OPCODE_DIV, dest0->m_type, dest0, arg0, arg1); hbb->append_insn (insn); insn = new hsa_insn_basic (3, BRIG_OPCODE_REM, dest1->m_type, dest1, arg0, arg1); hbb->append_insn (insn); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); BrigType16_t dst_type = hsa_extend_inttype_to_32bit (dest->m_type); BrigType16_t src_type = hsa_bittype_for_type (dest0->m_type); insn = new hsa_insn_packed (3, BRIG_OPCODE_COMBINE, dst_type, src_type, NULL, dest0, dest1); hbb->append_insn (insn); insn->set_output_in_type (dest, 0, hbb); } /* Emit instructions that implement FMA, FMS, FNMA or FNMS call STMT. Instructions are appended to basic block HBB. NEGATE1 is true for FNMA and FNMS. NEGATE3 is true for FMS and FNMS. */ static void gen_hsa_fma (gcall *call, hsa_bb *hbb, bool negate1, bool negate3) { tree lhs = gimple_call_lhs (call); if (lhs == NULL_TREE) return; tree rhs1 = gimple_call_arg (call, 0); tree rhs2 = gimple_call_arg (call, 1); tree rhs3 = gimple_call_arg (call, 2); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *op1 = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); hsa_op_with_type *op2 = hsa_reg_or_immed_for_gimple_op (rhs2, hbb); hsa_op_with_type *op3 = hsa_reg_or_immed_for_gimple_op (rhs3, hbb); if (negate1) { hsa_op_reg *tmp = new hsa_op_reg (dest->m_type); gen_hsa_unary_operation (BRIG_OPCODE_NEG, tmp, op1, hbb); op1 = tmp; } /* There is a native HSA instruction for scalar FMAs but not for vector ones. */ if (TREE_CODE (TREE_TYPE (lhs)) == VECTOR_TYPE) { hsa_op_reg *tmp = new hsa_op_reg (dest->m_type); gen_hsa_binary_operation (BRIG_OPCODE_MUL, tmp, op1, op2, hbb); gen_hsa_binary_operation (negate3 ? BRIG_OPCODE_SUB : BRIG_OPCODE_ADD, dest, tmp, op3, hbb); } else { if (negate3) { hsa_op_reg *tmp = new hsa_op_reg (dest->m_type); gen_hsa_unary_operation (BRIG_OPCODE_NEG, tmp, op3, hbb); op3 = tmp; } hsa_insn_basic *insn = new hsa_insn_basic (4, BRIG_OPCODE_MAD, dest->m_type, dest, op1, op2, op3); hbb->append_insn (insn); } } /* Set VALUE to a shadow kernel debug argument and append a new instruction to HBB basic block. */ static void set_debug_value (hsa_bb *hbb, hsa_op_with_type *value) { hsa_op_reg *shadow_reg_ptr = hsa_cfun->get_shadow_reg (); if (shadow_reg_ptr == NULL) return; hsa_op_address *addr = new hsa_op_address (shadow_reg_ptr, get_hsa_kernel_dispatch_offset ("debug")); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_ST, BRIG_TYPE_U64, value, addr); hbb->append_insn (mem); } void omp_simple_builtin::generate (gimple *stmt, hsa_bb *hbb) { if (m_sorry) { if (m_warning_message) HSA_SORRY_AT (gimple_location (stmt), m_warning_message); else HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement calls to %qs", m_name); } else if (m_warning_message != NULL) warning_at (gimple_location (stmt), OPT_Whsa, m_warning_message); if (m_return_value != NULL) { tree lhs = gimple_call_lhs (stmt); if (!lhs) return; hbb->append_insn (new hsa_insn_comment (m_name)); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *op = m_return_value->get_in_type (dest->m_type, hbb); hsa_build_append_simple_mov (dest, op, hbb); } } /* If STMT is a call of a known library function, generate code to perform it and return true. */ static bool gen_hsa_insns_for_known_library_call (gimple *stmt, hsa_bb *hbb) { bool handled = false; const char *name = hsa_get_declaration_name (gimple_call_fndecl (stmt)); char *copy = NULL; size_t len = strlen (name); if (len > 0 && name[len - 1] == '_') { copy = XNEWVEC (char, len + 1); strcpy (copy, name); copy[len - 1] = '\0'; name = copy; } /* Handle omp_* routines. */ if (strstr (name, "omp_") == name) { hsa_init_simple_builtins (); omp_simple_builtin *builtin = omp_simple_builtins->get (name); if (builtin) { builtin->generate (stmt, hbb); return true; } handled = true; if (strcmp (name, "omp_set_num_threads") == 0) gen_set_num_threads (gimple_call_arg (stmt, 0), hbb); else if (strcmp (name, "omp_get_thread_num") == 0) { hbb->append_insn (new hsa_insn_comment (name)); query_hsa_grid_nodim (stmt, BRIG_OPCODE_WORKITEMFLATABSID, hbb); } else if (strcmp (name, "omp_get_num_threads") == 0) { hbb->append_insn (new hsa_insn_comment (name)); gen_get_num_threads (stmt, hbb); } else if (strcmp (name, "omp_get_num_teams") == 0) gen_get_num_teams (stmt, hbb); else if (strcmp (name, "omp_get_team_num") == 0) gen_get_team_num (stmt, hbb); else if (strcmp (name, "omp_get_level") == 0) gen_get_level (stmt, hbb); else if (strcmp (name, "omp_get_active_level") == 0) gen_get_level (stmt, hbb); else if (strcmp (name, "omp_in_parallel") == 0) gen_get_level (stmt, hbb); else if (strcmp (name, "omp_get_max_threads") == 0) gen_get_max_threads (stmt, hbb); else handled = false; if (handled) { if (copy) free (copy); return true; } } if (strcmp (name, "__hsa_set_debug_value") == 0) { handled = true; if (hsa_cfun->has_shadow_reg_p ()) { tree rhs1 = gimple_call_arg (stmt, 0); hsa_op_with_type *src = hsa_reg_or_immed_for_gimple_op (rhs1, hbb); src = src->get_in_type (BRIG_TYPE_U64, hbb); set_debug_value (hbb, src); } } if (copy) free (copy); return handled; } /* Helper functions to create a single unary HSA operations out of calls to builtins. OPCODE is the HSA operation to be generated. STMT is a gimple call to a builtin. HBB is the HSA BB to which the instruction should be added. Note that nothing will be created if STMT does not have a LHS. */ static void gen_hsa_unaryop_for_builtin (BrigOpcode opcode, gimple *stmt, hsa_bb *hbb) { tree lhs = gimple_call_lhs (stmt); if (!lhs) return; hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *op = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 0), hbb); gen_hsa_unary_operation (opcode, dest, op, hbb); } /* Helper functions to create a call to standard library if LHS of the STMT is used. HBB is the HSA BB to which the instruction should be added. */ static void gen_hsa_unaryop_builtin_call (gimple *stmt, hsa_bb *hbb) { tree lhs = gimple_call_lhs (stmt); if (!lhs) return; if (gimple_call_internal_p (stmt)) gen_hsa_insns_for_call_of_internal_fn (stmt, hbb); else gen_hsa_insns_for_direct_call (stmt, hbb); } /* Helper functions to create a single unary HSA operations out of calls to builtins (if unsafe math optimizations are enable). Otherwise, create a call to standard library function. OPCODE is the HSA operation to be generated. STMT is a gimple call to a builtin. HBB is the HSA BB to which the instruction should be added. Note that nothing will be created if STMT does not have a LHS. */ static void gen_hsa_unaryop_or_call_for_builtin (BrigOpcode opcode, gimple *stmt, hsa_bb *hbb) { if (flag_unsafe_math_optimizations) gen_hsa_unaryop_for_builtin (opcode, stmt, hbb); else gen_hsa_unaryop_builtin_call (stmt, hbb); } /* Generate HSA address corresponding to a value VAL (as opposed to a memory reference tree), for example an SSA_NAME or an ADDR_EXPR. HBB is the HSA BB to which the instruction should be added. */ static hsa_op_address * get_address_from_value (tree val, hsa_bb *hbb) { switch (TREE_CODE (val)) { case SSA_NAME: { BrigType16_t addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); hsa_op_base *reg = hsa_cfun->reg_for_gimple_ssa (val)->get_in_type (addrtype, hbb); return new hsa_op_address (NULL, as_a <hsa_op_reg *> (reg), 0); } case ADDR_EXPR: return gen_hsa_addr (TREE_OPERAND (val, 0), hbb); case INTEGER_CST: if (tree_fits_shwi_p (val)) return new hsa_op_address (NULL, NULL, tree_to_shwi (val)); /* fall-through */ default: HSA_SORRY_ATV (EXPR_LOCATION (val), "support for HSA does not implement memory access to %E", val); return new hsa_op_address (NULL, NULL, 0); } } /* Expand assignment of a result of a string BUILTIN to DST. Size of the operation is N bytes, where instructions will be append to HBB. */ static void expand_lhs_of_string_op (gimple *stmt, unsigned HOST_WIDE_INT n, hsa_bb *hbb, enum built_in_function builtin) { /* If LHS is expected, we need to emit a PHI instruction. */ tree lhs = gimple_call_lhs (stmt); if (!lhs) return; hsa_op_reg *lhs_reg = hsa_cfun->reg_for_gimple_ssa (lhs); hsa_op_with_type *dst_reg = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 0), hbb); hsa_op_with_type *tmp; switch (builtin) { case BUILT_IN_MEMPCPY: { tmp = new hsa_op_reg (dst_reg->m_type); hsa_insn_basic *add = new hsa_insn_basic (3, BRIG_OPCODE_ADD, tmp->m_type, tmp, dst_reg, new hsa_op_immed (n, dst_reg->m_type)); hbb->append_insn (add); break; } case BUILT_IN_MEMCPY: case BUILT_IN_MEMSET: tmp = dst_reg; break; default: gcc_unreachable (); } hbb->append_insn (new hsa_insn_basic (2, BRIG_OPCODE_MOV, lhs_reg->m_type, lhs_reg, tmp)); } #define HSA_MEMORY_BUILTINS_LIMIT 128 /* Expand a string builtin (from a gimple STMT) in a way that according to MISALIGNED_FLAG we process either direct emission (a bunch of memory load and store instructions), or we emit a function call of a library function (for instance 'memcpy'). Actually, a basic block for direct emission is just prepared, where caller is responsible for emission of corresponding instructions. All instruction are appended to HBB. */ hsa_bb * expand_string_operation_builtin (gimple *stmt, hsa_bb *hbb, hsa_op_reg *misaligned_flag) { edge e = split_block (hbb->m_bb, stmt); basic_block condition_bb = e->src; hbb->append_insn (new hsa_insn_cbr (misaligned_flag)); /* Prepare the control flow. */ edge condition_edge = EDGE_SUCC (condition_bb, 0); basic_block call_bb = split_edge (condition_edge); basic_block expanded_bb = split_edge (EDGE_SUCC (call_bb, 0)); basic_block cont_bb = EDGE_SUCC (expanded_bb, 0)->dest; basic_block merge_bb = split_edge (EDGE_PRED (cont_bb, 0)); condition_edge->flags &= ~EDGE_FALLTHRU; condition_edge->flags |= EDGE_TRUE_VALUE; make_edge (condition_bb, expanded_bb, EDGE_FALSE_VALUE); redirect_edge_succ (EDGE_SUCC (call_bb, 0), merge_bb); hsa_cfun->m_modified_cfg = true; hsa_init_new_bb (expanded_bb); /* Slow path: function call. */ gen_hsa_insns_for_direct_call (stmt, hsa_init_new_bb (call_bb), false); return hsa_bb_for_bb (expanded_bb); } /* Expand a memory copy BUILTIN (BUILT_IN_MEMCPY, BUILT_IN_MEMPCPY) from a gimple STMT and store all necessary instruction to HBB basic block. */ static void expand_memory_copy (gimple *stmt, hsa_bb *hbb, enum built_in_function builtin) { tree byte_size = gimple_call_arg (stmt, 2); if (!tree_fits_uhwi_p (byte_size)) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } unsigned HOST_WIDE_INT n = tree_to_uhwi (byte_size); if (n > HSA_MEMORY_BUILTINS_LIMIT) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } tree dst = gimple_call_arg (stmt, 0); tree src = gimple_call_arg (stmt, 1); hsa_op_address *dst_addr = get_address_from_value (dst, hbb); hsa_op_address *src_addr = get_address_from_value (src, hbb); /* As gen_hsa_memory_copy relies on memory alignment greater or equal to 8 bytes, we need to verify the alignment. */ BrigType16_t addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); hsa_op_reg *src_addr_reg = new hsa_op_reg (addrtype); hsa_op_reg *dst_addr_reg = new hsa_op_reg (addrtype); convert_addr_to_flat_segment (src_addr, src_addr_reg, hbb); convert_addr_to_flat_segment (dst_addr, dst_addr_reg, hbb); /* Process BIT OR for source and destination addresses. */ hsa_op_reg *or_reg = new hsa_op_reg (addrtype); gen_hsa_binary_operation (BRIG_OPCODE_OR, or_reg, src_addr_reg, dst_addr_reg, hbb); /* Process BIT AND with 0x7 to identify the desired alignment of 8 bytes. */ hsa_op_reg *masked = new hsa_op_reg (addrtype); gen_hsa_binary_operation (BRIG_OPCODE_AND, masked, or_reg, new hsa_op_immed (7, addrtype), hbb); hsa_op_reg *misaligned = new hsa_op_reg (BRIG_TYPE_B1); hbb->append_insn (new hsa_insn_cmp (BRIG_COMPARE_NE, misaligned->m_type, misaligned, masked, new hsa_op_immed (0, masked->m_type))); hsa_bb *native_impl_bb = expand_string_operation_builtin (stmt, hbb, misaligned); gen_hsa_memory_copy (native_impl_bb, dst_addr, src_addr, n, BRIG_ALIGNMENT_8); hsa_bb *merge_bb = hsa_init_new_bb (EDGE_SUCC (native_impl_bb->m_bb, 0)->dest); expand_lhs_of_string_op (stmt, n, merge_bb, builtin); } /* Expand a memory set BUILTIN (BUILT_IN_MEMSET, BUILT_IN_BZERO) from a gimple STMT and store all necessary instruction to HBB basic block. The operation set N bytes with a CONSTANT value. */ static void expand_memory_set (gimple *stmt, unsigned HOST_WIDE_INT n, unsigned HOST_WIDE_INT constant, hsa_bb *hbb, enum built_in_function builtin) { tree dst = gimple_call_arg (stmt, 0); hsa_op_address *dst_addr = get_address_from_value (dst, hbb); /* As gen_hsa_memory_set relies on memory alignment greater or equal to 8 bytes, we need to verify the alignment. */ BrigType16_t addrtype = hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT); hsa_op_reg *dst_addr_reg = new hsa_op_reg (addrtype); convert_addr_to_flat_segment (dst_addr, dst_addr_reg, hbb); /* Process BIT AND with 0x7 to identify the desired alignment of 8 bytes. */ hsa_op_reg *masked = new hsa_op_reg (addrtype); gen_hsa_binary_operation (BRIG_OPCODE_AND, masked, dst_addr_reg, new hsa_op_immed (7, addrtype), hbb); hsa_op_reg *misaligned = new hsa_op_reg (BRIG_TYPE_B1); hbb->append_insn (new hsa_insn_cmp (BRIG_COMPARE_NE, misaligned->m_type, misaligned, masked, new hsa_op_immed (0, masked->m_type))); hsa_bb *native_impl_bb = expand_string_operation_builtin (stmt, hbb, misaligned); gen_hsa_memory_set (native_impl_bb, dst_addr, constant, n, BRIG_ALIGNMENT_8); hsa_bb *merge_bb = hsa_init_new_bb (EDGE_SUCC (native_impl_bb->m_bb, 0)->dest); expand_lhs_of_string_op (stmt, n, merge_bb, builtin); } /* Store into MEMORDER the memory order specified by tree T, which must be an integer constant representing a C++ memory order. If it isn't, issue an HSA sorry message using LOC and return true, otherwise return false and store the name of the requested order to *MNAME. */ static bool hsa_memorder_from_tree (tree t, BrigMemoryOrder *memorder, const char **mname, location_t loc) { if (!tree_fits_uhwi_p (t)) { HSA_SORRY_ATV (loc, "support for HSA does not implement memory model %E", t); return true; } unsigned HOST_WIDE_INT mm = tree_to_uhwi (t); switch (mm & MEMMODEL_BASE_MASK) { case MEMMODEL_RELAXED: *memorder = BRIG_MEMORY_ORDER_RELAXED; *mname = "relaxed"; break; case MEMMODEL_CONSUME: /* HSA does not have an equivalent, but we can use the slightly stronger ACQUIRE. */ *memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE; *mname = "consume"; break; case MEMMODEL_ACQUIRE: *memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE; *mname = "acquire"; break; case MEMMODEL_RELEASE: *memorder = BRIG_MEMORY_ORDER_SC_RELEASE; *mname = "release"; break; case MEMMODEL_ACQ_REL: *memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE_RELEASE; *mname = "acq_rel"; break; case MEMMODEL_SEQ_CST: /* Callers implementing a simple load or store need to remove the release or acquire part respectively. */ *memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE_RELEASE; *mname = "seq_cst"; break; default: { HSA_SORRY_AT (loc, "support for HSA does not implement the specified " "memory model"); return true; } } return false; } /* Helper function to create an HSA atomic operation instruction out of calls to atomic builtins. RET_ORIG is true if the built-in is the variant that return s the value before applying operation, and false if it should return the value after applying the operation (if it returns value at all). ACODE is the atomic operation code, STMT is a gimple call to a builtin. HBB is the HSA BB to which the instruction should be added. If SIGNAL is true, the created operation will work on HSA signals rather than atomic variables. */ static void gen_hsa_atomic_for_builtin (bool ret_orig, enum BrigAtomicOperation acode, gimple *stmt, hsa_bb *hbb, bool signal) { tree lhs = gimple_call_lhs (stmt); tree type = TREE_TYPE (gimple_call_arg (stmt, 1)); BrigType16_t hsa_type = hsa_type_for_scalar_tree_type (type, false); BrigType16_t mtype = mem_type_for_type (hsa_type); BrigMemoryOrder memorder; const char *mmname; if (hsa_memorder_from_tree (gimple_call_arg (stmt, 2), &memorder, &mmname, gimple_location (stmt))) return; /* Certain atomic insns must have Bx memory types. */ switch (acode) { case BRIG_ATOMIC_LD: case BRIG_ATOMIC_ST: case BRIG_ATOMIC_AND: case BRIG_ATOMIC_OR: case BRIG_ATOMIC_XOR: case BRIG_ATOMIC_EXCH: mtype = hsa_bittype_for_type (mtype); break; default: break; } hsa_op_reg *dest; int nops, opcode; if (lhs) { if (ret_orig) dest = hsa_cfun->reg_for_gimple_ssa (lhs); else dest = new hsa_op_reg (hsa_type); opcode = signal ? BRIG_OPCODE_SIGNAL : BRIG_OPCODE_ATOMIC; nops = 3; } else { dest = NULL; opcode = signal ? BRIG_OPCODE_SIGNALNORET : BRIG_OPCODE_ATOMICNORET; nops = 2; } if (acode == BRIG_ATOMIC_ST) { if (memorder == BRIG_MEMORY_ORDER_SC_ACQUIRE_RELEASE) memorder = BRIG_MEMORY_ORDER_SC_RELEASE; if (memorder != BRIG_MEMORY_ORDER_RELAXED && memorder != BRIG_MEMORY_ORDER_SC_RELEASE && memorder != BRIG_MEMORY_ORDER_NONE) { HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement memory model for " "%<ATOMIC_ST%>: %s", mmname); return; } } hsa_insn_basic *atominsn; hsa_op_base *tgt; if (signal) { atominsn = new hsa_insn_signal (nops, opcode, acode, mtype, memorder); tgt = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 0), hbb); } else { atominsn = new hsa_insn_atomic (nops, opcode, acode, mtype, memorder); hsa_op_address *addr; addr = get_address_from_value (gimple_call_arg (stmt, 0), hbb); if (addr->m_symbol && addr->m_symbol->m_segment == BRIG_SEGMENT_PRIVATE) { HSA_SORRY_AT (gimple_location (stmt), "HSA does not implement atomic operations in private " "segment"); return; } tgt = addr; } hsa_op_with_type *op = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 1), hbb); if (lhs) { atominsn->set_op (0, dest); atominsn->set_op (1, tgt); atominsn->set_op (2, op); } else { atominsn->set_op (0, tgt); atominsn->set_op (1, op); } hbb->append_insn (atominsn); /* HSA does not natively support the variants that return the modified value, so re-do the operation again non-atomically if that is what was requested. */ if (lhs && !ret_orig) { int arith; switch (acode) { case BRIG_ATOMIC_ADD: arith = BRIG_OPCODE_ADD; break; case BRIG_ATOMIC_AND: arith = BRIG_OPCODE_AND; break; case BRIG_ATOMIC_OR: arith = BRIG_OPCODE_OR; break; case BRIG_ATOMIC_SUB: arith = BRIG_OPCODE_SUB; break; case BRIG_ATOMIC_XOR: arith = BRIG_OPCODE_XOR; break; default: gcc_unreachable (); } hsa_op_reg *real_dest = hsa_cfun->reg_for_gimple_ssa (lhs); gen_hsa_binary_operation (arith, real_dest, dest, op, hbb); } } /* Generate HSA instructions for an internal fn. Instructions will be appended to HBB, which also needs to be the corresponding structure to the basic_block of STMT. */ static void gen_hsa_insn_for_internal_fn_call (gcall *stmt, hsa_bb *hbb) { gcc_checking_assert (gimple_call_internal_fn (stmt)); internal_fn fn = gimple_call_internal_fn (stmt); bool is_float_type_p = false; if (gimple_call_lhs (stmt) != NULL && TREE_TYPE (gimple_call_lhs (stmt)) == float_type_node) is_float_type_p = true; switch (fn) { case IFN_CEIL: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_CEIL, stmt, hbb); break; case IFN_FLOOR: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_FLOOR, stmt, hbb); break; case IFN_RINT: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_RINT, stmt, hbb); break; case IFN_SQRT: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_SQRT, stmt, hbb); break; case IFN_RSQRT: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_NRSQRT, stmt, hbb); break; case IFN_TRUNC: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_TRUNC, stmt, hbb); break; case IFN_COS: { if (is_float_type_p) gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NCOS, stmt, hbb); else gen_hsa_unaryop_builtin_call (stmt, hbb); break; } case IFN_EXP2: { if (is_float_type_p) gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NEXP2, stmt, hbb); else gen_hsa_unaryop_builtin_call (stmt, hbb); break; } case IFN_LOG2: { if (is_float_type_p) gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NLOG2, stmt, hbb); else gen_hsa_unaryop_builtin_call (stmt, hbb); break; } case IFN_SIN: { if (is_float_type_p) gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NSIN, stmt, hbb); else gen_hsa_unaryop_builtin_call (stmt, hbb); break; } case IFN_CLRSB: gen_hsa_clrsb (stmt, hbb); break; case IFN_CLZ: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_FIRSTBIT, stmt, hbb); break; case IFN_CTZ: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_LASTBIT, stmt, hbb); break; case IFN_FFS: gen_hsa_ffs (stmt, hbb); break; case IFN_PARITY: gen_hsa_parity (stmt, hbb); break; case IFN_POPCOUNT: gen_hsa_popcount (stmt, hbb); break; case IFN_DIVMOD: gen_hsa_divmod (stmt, hbb); break; case IFN_ACOS: case IFN_ASIN: case IFN_ATAN: case IFN_EXP: case IFN_EXP10: case IFN_EXPM1: case IFN_LOG: case IFN_LOG10: case IFN_LOG1P: case IFN_LOGB: case IFN_SIGNIFICAND: case IFN_TAN: case IFN_NEARBYINT: case IFN_ROUND: case IFN_ATAN2: case IFN_COPYSIGN: case IFN_FMOD: case IFN_POW: case IFN_REMAINDER: case IFN_SCALB: case IFN_FMIN: case IFN_FMAX: gen_hsa_insns_for_call_of_internal_fn (stmt, hbb); break; case IFN_FMA: gen_hsa_fma (stmt, hbb, false, false); break; case IFN_FMS: gen_hsa_fma (stmt, hbb, false, true); break; case IFN_FNMA: gen_hsa_fma (stmt, hbb, true, false); break; case IFN_FNMS: gen_hsa_fma (stmt, hbb, true, true); break; default: HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement internal function: %s", internal_fn_name (fn)); break; } } /* Generate HSA instructions for the given call statement STMT. Instructions will be appended to HBB. */ static void gen_hsa_insns_for_call (gimple *stmt, hsa_bb *hbb) { gcall *call = as_a <gcall *> (stmt); tree lhs = gimple_call_lhs (stmt); hsa_op_reg *dest; if (gimple_call_internal_p (stmt)) { gen_hsa_insn_for_internal_fn_call (call, hbb); return; } if (!gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) { tree function_decl = gimple_call_fndecl (stmt); if (function_decl == NULL_TREE) { HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement indirect calls"); return; } /* Prefetch pass can create type-mismatching prefetch builtin calls which fail the gimple_call_builtin_p test above. Handle them here. */ if (fndecl_built_in_p (function_decl, BUILT_IN_PREFETCH)) return; if (hsa_callable_function_p (function_decl)) gen_hsa_insns_for_direct_call (stmt, hbb); else if (!gen_hsa_insns_for_known_library_call (stmt, hbb)) HSA_SORRY_AT (gimple_location (stmt), "HSA supports only calls of functions marked with " "%<#pragma omp declare target%>"); return; } tree fndecl = gimple_call_fndecl (stmt); enum built_in_function builtin = DECL_FUNCTION_CODE (fndecl); switch (builtin) { case BUILT_IN_FABS: case BUILT_IN_FABSF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_ABS, stmt, hbb); break; case BUILT_IN_CEIL: case BUILT_IN_CEILF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_CEIL, stmt, hbb); break; case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_FLOOR, stmt, hbb); break; case BUILT_IN_RINT: case BUILT_IN_RINTF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_RINT, stmt, hbb); break; case BUILT_IN_SQRT: case BUILT_IN_SQRTF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_SQRT, stmt, hbb); break; case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_TRUNC, stmt, hbb); break; case BUILT_IN_COS: case BUILT_IN_SIN: case BUILT_IN_EXP2: case BUILT_IN_LOG2: /* HSAIL does not provide an instruction for double argument type. */ gen_hsa_unaryop_builtin_call (stmt, hbb); break; case BUILT_IN_COSF: gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NCOS, stmt, hbb); break; case BUILT_IN_EXP2F: gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NEXP2, stmt, hbb); break; case BUILT_IN_LOG2F: gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NLOG2, stmt, hbb); break; case BUILT_IN_SINF: gen_hsa_unaryop_or_call_for_builtin (BRIG_OPCODE_NSIN, stmt, hbb); break; case BUILT_IN_CLRSB: case BUILT_IN_CLRSBL: case BUILT_IN_CLRSBLL: gen_hsa_clrsb (call, hbb); break; case BUILT_IN_CLZ: case BUILT_IN_CLZL: case BUILT_IN_CLZLL: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_FIRSTBIT, stmt, hbb); break; case BUILT_IN_CTZ: case BUILT_IN_CTZL: case BUILT_IN_CTZLL: gen_hsa_unaryop_for_builtin (BRIG_OPCODE_LASTBIT, stmt, hbb); break; case BUILT_IN_FFS: case BUILT_IN_FFSL: case BUILT_IN_FFSLL: gen_hsa_ffs (call, hbb); break; case BUILT_IN_PARITY: case BUILT_IN_PARITYL: case BUILT_IN_PARITYLL: gen_hsa_parity (call, hbb); break; case BUILT_IN_POPCOUNT: case BUILT_IN_POPCOUNTL: case BUILT_IN_POPCOUNTLL: gen_hsa_popcount (call, hbb); break; case BUILT_IN_ATOMIC_LOAD_1: case BUILT_IN_ATOMIC_LOAD_2: case BUILT_IN_ATOMIC_LOAD_4: case BUILT_IN_ATOMIC_LOAD_8: case BUILT_IN_ATOMIC_LOAD_16: { BrigType16_t mtype; hsa_op_base *src; src = get_address_from_value (gimple_call_arg (stmt, 0), hbb); BrigMemoryOrder memorder; const char *mmname; if (hsa_memorder_from_tree (gimple_call_arg (stmt, 1), &memorder, &mmname, gimple_location (stmt))) return; if (memorder == BRIG_MEMORY_ORDER_SC_ACQUIRE_RELEASE) memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE; if (memorder != BRIG_MEMORY_ORDER_RELAXED && memorder != BRIG_MEMORY_ORDER_SC_ACQUIRE && memorder != BRIG_MEMORY_ORDER_NONE) { HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement " "memory model for atomic loads: %s", mmname); return; } if (lhs) { BrigType16_t t = hsa_type_for_scalar_tree_type (TREE_TYPE (lhs), false); mtype = mem_type_for_type (t); mtype = hsa_bittype_for_type (mtype); dest = hsa_cfun->reg_for_gimple_ssa (lhs); } else { mtype = BRIG_TYPE_B64; dest = new hsa_op_reg (mtype); } hsa_insn_basic *atominsn; atominsn = new hsa_insn_atomic (2, BRIG_OPCODE_ATOMIC, BRIG_ATOMIC_LD, mtype, memorder, dest, src); hbb->append_insn (atominsn); break; } case BUILT_IN_ATOMIC_EXCHANGE_1: case BUILT_IN_ATOMIC_EXCHANGE_2: case BUILT_IN_ATOMIC_EXCHANGE_4: case BUILT_IN_ATOMIC_EXCHANGE_8: case BUILT_IN_ATOMIC_EXCHANGE_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_EXCH, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_FETCH_ADD_1: case BUILT_IN_ATOMIC_FETCH_ADD_2: case BUILT_IN_ATOMIC_FETCH_ADD_4: case BUILT_IN_ATOMIC_FETCH_ADD_8: case BUILT_IN_ATOMIC_FETCH_ADD_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_ADD, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_FETCH_SUB_1: case BUILT_IN_ATOMIC_FETCH_SUB_2: case BUILT_IN_ATOMIC_FETCH_SUB_4: case BUILT_IN_ATOMIC_FETCH_SUB_8: case BUILT_IN_ATOMIC_FETCH_SUB_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_SUB, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_FETCH_AND_1: case BUILT_IN_ATOMIC_FETCH_AND_2: case BUILT_IN_ATOMIC_FETCH_AND_4: case BUILT_IN_ATOMIC_FETCH_AND_8: case BUILT_IN_ATOMIC_FETCH_AND_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_AND, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_FETCH_XOR_1: case BUILT_IN_ATOMIC_FETCH_XOR_2: case BUILT_IN_ATOMIC_FETCH_XOR_4: case BUILT_IN_ATOMIC_FETCH_XOR_8: case BUILT_IN_ATOMIC_FETCH_XOR_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_XOR, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_FETCH_OR_1: case BUILT_IN_ATOMIC_FETCH_OR_2: case BUILT_IN_ATOMIC_FETCH_OR_4: case BUILT_IN_ATOMIC_FETCH_OR_8: case BUILT_IN_ATOMIC_FETCH_OR_16: gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_OR, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_STORE_1: case BUILT_IN_ATOMIC_STORE_2: case BUILT_IN_ATOMIC_STORE_4: case BUILT_IN_ATOMIC_STORE_8: case BUILT_IN_ATOMIC_STORE_16: /* Since there cannot be any LHS, the first parameter is meaningless. */ gen_hsa_atomic_for_builtin (true, BRIG_ATOMIC_ST, stmt, hbb, false); break; break; case BUILT_IN_ATOMIC_ADD_FETCH_1: case BUILT_IN_ATOMIC_ADD_FETCH_2: case BUILT_IN_ATOMIC_ADD_FETCH_4: case BUILT_IN_ATOMIC_ADD_FETCH_8: case BUILT_IN_ATOMIC_ADD_FETCH_16: gen_hsa_atomic_for_builtin (false, BRIG_ATOMIC_ADD, stmt, hbb, false); break; case BUILT_IN_ATOMIC_SUB_FETCH_1: case BUILT_IN_ATOMIC_SUB_FETCH_2: case BUILT_IN_ATOMIC_SUB_FETCH_4: case BUILT_IN_ATOMIC_SUB_FETCH_8: case BUILT_IN_ATOMIC_SUB_FETCH_16: gen_hsa_atomic_for_builtin (false, BRIG_ATOMIC_SUB, stmt, hbb, false); break; case BUILT_IN_ATOMIC_AND_FETCH_1: case BUILT_IN_ATOMIC_AND_FETCH_2: case BUILT_IN_ATOMIC_AND_FETCH_4: case BUILT_IN_ATOMIC_AND_FETCH_8: case BUILT_IN_ATOMIC_AND_FETCH_16: gen_hsa_atomic_for_builtin (false, BRIG_ATOMIC_AND, stmt, hbb, false); break; case BUILT_IN_ATOMIC_XOR_FETCH_1: case BUILT_IN_ATOMIC_XOR_FETCH_2: case BUILT_IN_ATOMIC_XOR_FETCH_4: case BUILT_IN_ATOMIC_XOR_FETCH_8: case BUILT_IN_ATOMIC_XOR_FETCH_16: gen_hsa_atomic_for_builtin (false, BRIG_ATOMIC_XOR, stmt, hbb, false); break; case BUILT_IN_ATOMIC_OR_FETCH_1: case BUILT_IN_ATOMIC_OR_FETCH_2: case BUILT_IN_ATOMIC_OR_FETCH_4: case BUILT_IN_ATOMIC_OR_FETCH_8: case BUILT_IN_ATOMIC_OR_FETCH_16: gen_hsa_atomic_for_builtin (false, BRIG_ATOMIC_OR, stmt, hbb, false); break; case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: { tree type = TREE_TYPE (gimple_call_arg (stmt, 1)); BrigType16_t atype = hsa_bittype_for_type (hsa_type_for_scalar_tree_type (type, false)); BrigMemoryOrder memorder = BRIG_MEMORY_ORDER_SC_ACQUIRE_RELEASE; hsa_insn_basic *atominsn; hsa_op_base *tgt; atominsn = new hsa_insn_atomic (4, BRIG_OPCODE_ATOMIC, BRIG_ATOMIC_CAS, atype, memorder); tgt = get_address_from_value (gimple_call_arg (stmt, 0), hbb); if (lhs != NULL) dest = hsa_cfun->reg_for_gimple_ssa (lhs); else dest = new hsa_op_reg (atype); atominsn->set_op (0, dest); atominsn->set_op (1, tgt); hsa_op_with_type *op = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 1), hbb); atominsn->set_op (2, op); op = hsa_reg_or_immed_for_gimple_op (gimple_call_arg (stmt, 2), hbb); atominsn->set_op (3, op); hbb->append_insn (atominsn); break; } case BUILT_IN_HSA_WORKGROUPID: query_hsa_grid_dim (stmt, BRIG_OPCODE_WORKGROUPID, hbb); break; case BUILT_IN_HSA_WORKITEMID: query_hsa_grid_dim (stmt, BRIG_OPCODE_WORKITEMID, hbb); break; case BUILT_IN_HSA_WORKITEMABSID: query_hsa_grid_dim (stmt, BRIG_OPCODE_WORKITEMABSID, hbb); break; case BUILT_IN_HSA_GRIDSIZE: query_hsa_grid_dim (stmt, BRIG_OPCODE_GRIDSIZE, hbb); break; case BUILT_IN_HSA_CURRENTWORKGROUPSIZE: query_hsa_grid_dim (stmt, BRIG_OPCODE_CURRENTWORKGROUPSIZE, hbb); break; case BUILT_IN_GOMP_BARRIER: hbb->append_insn (new hsa_insn_br (0, BRIG_OPCODE_BARRIER, BRIG_TYPE_NONE, BRIG_WIDTH_ALL)); break; case BUILT_IN_GOMP_PARALLEL: HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement non-gridified " "OpenMP parallel constructs"); break; case BUILT_IN_OMP_GET_THREAD_NUM: { query_hsa_grid_nodim (stmt, BRIG_OPCODE_WORKITEMFLATABSID, hbb); break; } case BUILT_IN_OMP_GET_NUM_THREADS: { gen_get_num_threads (stmt, hbb); break; } case BUILT_IN_GOMP_TEAMS: { gen_set_num_threads (gimple_call_arg (stmt, 1), hbb); break; } case BUILT_IN_OMP_GET_NUM_TEAMS: { gen_get_num_teams (stmt, hbb); break; } case BUILT_IN_OMP_GET_TEAM_NUM: { gen_get_team_num (stmt, hbb); break; } case BUILT_IN_MEMCPY: case BUILT_IN_MEMPCPY: { expand_memory_copy (stmt, hbb, builtin); break; } case BUILT_IN_MEMSET: { tree c = gimple_call_arg (stmt, 1); if (TREE_CODE (c) != INTEGER_CST) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } tree byte_size = gimple_call_arg (stmt, 2); if (!tree_fits_uhwi_p (byte_size)) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } unsigned HOST_WIDE_INT n = tree_to_uhwi (byte_size); if (n > HSA_MEMORY_BUILTINS_LIMIT) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } unsigned HOST_WIDE_INT constant = tree_to_uhwi (fold_convert (unsigned_char_type_node, c)); expand_memory_set (stmt, n, constant, hbb, builtin); break; } case BUILT_IN_BZERO: { tree byte_size = gimple_call_arg (stmt, 1); if (!tree_fits_uhwi_p (byte_size)) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } unsigned HOST_WIDE_INT n = tree_to_uhwi (byte_size); if (n > HSA_MEMORY_BUILTINS_LIMIT) { gen_hsa_insns_for_direct_call (stmt, hbb); return; } expand_memory_set (stmt, n, 0, hbb, builtin); break; } CASE_BUILT_IN_ALLOCA: { gen_hsa_alloca (call, hbb); break; } case BUILT_IN_PREFETCH: break; default: { tree name_tree = DECL_NAME (fndecl); const char *s = IDENTIFIER_POINTER (name_tree); size_t len = strlen (s); if (len > 4 && (strncmp (s, "__builtin_GOMP_", 15) == 0)) HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement GOMP function %s", s); else gen_hsa_insns_for_direct_call (stmt, hbb); return; } } } /* Generate HSA instructions for a given gimple statement. Instructions will be appended to HBB. */ static void gen_hsa_insns_for_gimple_stmt (gimple *stmt, hsa_bb *hbb) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: if (gimple_clobber_p (stmt)) break; if (gimple_assign_single_p (stmt)) { tree lhs = gimple_assign_lhs (stmt); tree rhs = gimple_assign_rhs1 (stmt); gen_hsa_insns_for_single_assignment (lhs, rhs, hbb); } else gen_hsa_insns_for_operation_assignment (stmt, hbb); break; case GIMPLE_RETURN: gen_hsa_insns_for_return (as_a <greturn *> (stmt), hbb); break; case GIMPLE_COND: gen_hsa_insns_for_cond_stmt (stmt, hbb); break; case GIMPLE_CALL: gen_hsa_insns_for_call (stmt, hbb); break; case GIMPLE_DEBUG: /* ??? HSA supports some debug facilities. */ break; case GIMPLE_LABEL: { tree label = gimple_label_label (as_a <glabel *> (stmt)); if (FORCED_LABEL (label)) HSA_SORRY_AT (gimple_location (stmt), "support for HSA does not implement gimple label with " "address taken"); break; } case GIMPLE_NOP: { hbb->append_insn (new hsa_insn_basic (0, BRIG_OPCODE_NOP)); break; } case GIMPLE_SWITCH: { gen_hsa_insns_for_switch_stmt (as_a <gswitch *> (stmt), hbb); break; } default: HSA_SORRY_ATV (gimple_location (stmt), "support for HSA does not implement gimple statement %s", gimple_code_name[(int) gimple_code (stmt)]); } } /* Generate a HSA PHI from a gimple PHI. */ static void gen_hsa_phi_from_gimple_phi (gimple *phi_stmt, hsa_bb *hbb) { hsa_insn_phi *hphi; unsigned count = gimple_phi_num_args (phi_stmt); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (gimple_phi_result (phi_stmt)); hphi = new hsa_insn_phi (count, dest); hphi->m_bb = hbb->m_bb; auto_vec <tree, 8> aexprs; auto_vec <hsa_op_reg *, 8> aregs; /* Calling split_edge when processing a PHI node messes up with the order of gimple phi node arguments (it moves the one associated with the edge to the end). We need to keep the order of edges and arguments of HSA phi node arguments consistent, so we do all required splitting as the first step, and in reverse order as to not be affected by the re-orderings. */ for (unsigned j = count; j != 0; j--) { unsigned i = j - 1; tree op = gimple_phi_arg_def (phi_stmt, i); if (TREE_CODE (op) != ADDR_EXPR) continue; edge e = gimple_phi_arg_edge (as_a <gphi *> (phi_stmt), i); hsa_bb *hbb_src = hsa_init_new_bb (split_edge (e)); hsa_op_address *addr = gen_hsa_addr (TREE_OPERAND (op, 0), hbb_src); hsa_op_reg *dest = new hsa_op_reg (hsa_get_segment_addr_type (BRIG_SEGMENT_FLAT)); hsa_insn_basic *insn = new hsa_insn_basic (2, BRIG_OPCODE_LDA, BRIG_TYPE_U64, dest, addr); hbb_src->append_insn (insn); aexprs.safe_push (op); aregs.safe_push (dest); } tree lhs = gimple_phi_result (phi_stmt); for (unsigned i = 0; i < count; i++) { tree op = gimple_phi_arg_def (phi_stmt, i); if (TREE_CODE (op) == SSA_NAME) { hsa_op_reg *hreg = hsa_cfun->reg_for_gimple_ssa (op); hphi->set_op (i, hreg); } else { gcc_assert (is_gimple_min_invariant (op)); tree t = TREE_TYPE (op); if (!POINTER_TYPE_P (t) || (TREE_CODE (op) == STRING_CST && TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)) hphi->set_op (i, new hsa_op_immed (op)); else if (POINTER_TYPE_P (TREE_TYPE (lhs)) && TREE_CODE (op) == INTEGER_CST) { /* Handle assignment of NULL value to a pointer type. */ hphi->set_op (i, new hsa_op_immed (op)); } else if (TREE_CODE (op) == ADDR_EXPR) { hsa_op_reg *dest = NULL; for (unsigned a_idx = 0; a_idx < aexprs.length (); a_idx++) if (aexprs[a_idx] == op) { dest = aregs[a_idx]; break; } gcc_assert (dest); hphi->set_op (i, dest); } else { HSA_SORRY_AT (gimple_location (phi_stmt), "support for HSA does not handle PHI nodes with " "constant address operands"); return; } } } hbb->append_phi (hphi); } /* Constructor of class containing HSA-specific information about a basic block. CFG_BB is the CFG BB this HSA BB is associated with. IDX is the new index of this BB (so that the constructor does not attempt to use hsa_cfun during its construction). */ hsa_bb::hsa_bb (basic_block cfg_bb, int idx) : m_bb (cfg_bb), m_first_insn (NULL), m_last_insn (NULL), m_first_phi (NULL), m_last_phi (NULL), m_index (idx) { gcc_assert (!cfg_bb->aux); cfg_bb->aux = this; } /* Constructor of class containing HSA-specific information about a basic block. CFG_BB is the CFG BB this HSA BB is associated with. */ hsa_bb::hsa_bb (basic_block cfg_bb) : m_bb (cfg_bb), m_first_insn (NULL), m_last_insn (NULL), m_first_phi (NULL), m_last_phi (NULL), m_index (hsa_cfun->m_hbb_count++) { gcc_assert (!cfg_bb->aux); cfg_bb->aux = this; } /* Create and initialize and return a new hsa_bb structure for a given CFG basic block BB. */ hsa_bb * hsa_init_new_bb (basic_block bb) { void *m = obstack_alloc (&hsa_obstack, sizeof (hsa_bb)); return new (m) hsa_bb (bb); } /* Initialize OMP in an HSA basic block PROLOGUE. */ static void init_prologue (void) { if (!hsa_cfun->m_kern_p) return; hsa_bb *prologue = hsa_bb_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); /* Create a magic number that is going to be printed by libgomp. */ unsigned index = hsa_get_number_decl_kernel_mappings (); /* Emit store to debug argument. */ if (param_hsa_gen_debug_stores > 0) set_debug_value (prologue, new hsa_op_immed (1000 + index, BRIG_TYPE_U64)); } /* Initialize hsa_num_threads to a default value. */ static void init_hsa_num_threads (void) { hsa_bb *prologue = hsa_bb_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); /* Save the default value to private variable hsa_num_threads. */ hsa_insn_basic *basic = new hsa_insn_mem (BRIG_OPCODE_ST, hsa_num_threads->m_type, new hsa_op_immed (0, hsa_num_threads->m_type), new hsa_op_address (hsa_num_threads)); prologue->append_insn (basic); } /* Go over gimple representation and generate our internal HSA one. */ static void gen_body_from_gimple () { basic_block bb; /* Verify CFG for complex edges we are unable to handle. */ edge_iterator ei; edge e; FOR_EACH_BB_FN (bb, cfun) { FOR_EACH_EDGE (e, ei, bb->succs) { /* Verify all unsupported flags for edges that point to the same basic block. */ if (e->flags & EDGE_EH) { HSA_SORRY_AT (UNKNOWN_LOCATION, "support for HSA does not implement exception " "handling"); return; } } } FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator gsi; hsa_bb *hbb = hsa_bb_for_bb (bb); if (hbb) continue; hbb = hsa_init_new_bb (bb); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gen_hsa_insns_for_gimple_stmt (gsi_stmt (gsi), hbb); if (hsa_seen_error ()) return; } } FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator gsi; hsa_bb *hbb = hsa_bb_for_bb (bb); gcc_assert (hbb != NULL); for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (!virtual_operand_p (gimple_phi_result (gsi_stmt (gsi)))) gen_hsa_phi_from_gimple_phi (gsi_stmt (gsi), hbb); } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "------- Generated SSA form -------\n"); dump_hsa_cfun (dump_file); } } static void gen_function_decl_parameters (hsa_function_representation *f, tree decl) { tree parm; unsigned i; for (parm = TYPE_ARG_TYPES (TREE_TYPE (decl)), i = 0; parm; parm = TREE_CHAIN (parm), i++) { /* Result type if last in the tree list. */ if (TREE_CHAIN (parm) == NULL) break; tree v = TREE_VALUE (parm); hsa_symbol *arg = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG, BRIG_LINKAGE_NONE); arg->m_type = hsa_type_for_tree_type (v, &arg->m_dim); arg->m_name_number = i; f->m_input_args.safe_push (arg); } tree result_type = TREE_TYPE (TREE_TYPE (decl)); if (!VOID_TYPE_P (result_type)) { f->m_output_arg = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG, BRIG_LINKAGE_NONE); f->m_output_arg->m_type = hsa_type_for_tree_type (result_type, &f->m_output_arg->m_dim); f->m_output_arg->m_name = "res"; } } /* Generate the vector of parameters of the HSA representation of the current function. This also includes the output parameter representing the result. */ static void gen_function_def_parameters () { tree parm; hsa_bb *prologue = hsa_bb_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); for (parm = DECL_ARGUMENTS (cfun->decl); parm; parm = DECL_CHAIN (parm)) { class hsa_symbol **slot; hsa_symbol *arg = new hsa_symbol (BRIG_TYPE_NONE, hsa_cfun->m_kern_p ? BRIG_SEGMENT_KERNARG : BRIG_SEGMENT_ARG, BRIG_LINKAGE_FUNCTION); arg->fillup_for_decl (parm); hsa_cfun->m_input_args.safe_push (arg); if (hsa_seen_error ()) return; arg->m_name = hsa_get_declaration_name (parm); /* Copy all input arguments and create corresponding private symbols for them. */ hsa_symbol *private_arg; hsa_op_address *parm_addr = new hsa_op_address (arg); if (TREE_ADDRESSABLE (parm) || (!is_gimple_reg (parm) && !TREE_READONLY (parm))) { private_arg = hsa_cfun->create_hsa_temporary (arg->m_type); private_arg->fillup_for_decl (parm); BrigAlignment8_t align = MIN (arg->m_align, private_arg->m_align); hsa_op_address *private_arg_addr = new hsa_op_address (private_arg); gen_hsa_memory_copy (prologue, private_arg_addr, parm_addr, arg->total_byte_size (), align); } else private_arg = arg; slot = hsa_cfun->m_local_symbols->find_slot (private_arg, INSERT); gcc_assert (!*slot); *slot = private_arg; if (is_gimple_reg (parm)) { tree ddef = ssa_default_def (cfun, parm); if (ddef && !has_zero_uses (ddef)) { BrigType16_t t = hsa_type_for_scalar_tree_type (TREE_TYPE (ddef), false); BrigType16_t mtype = mem_type_for_type (t); hsa_op_reg *dest = hsa_cfun->reg_for_gimple_ssa (ddef); hsa_insn_mem *mem = new hsa_insn_mem (BRIG_OPCODE_LD, mtype, dest, parm_addr); gcc_assert (!parm_addr->m_reg); prologue->append_insn (mem); } } } if (!VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl)))) { class hsa_symbol **slot; hsa_cfun->m_output_arg = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG, BRIG_LINKAGE_FUNCTION); hsa_cfun->m_output_arg->fillup_for_decl (DECL_RESULT (cfun->decl)); if (hsa_seen_error ()) return; hsa_cfun->m_output_arg->m_name = "res"; slot = hsa_cfun->m_local_symbols->find_slot (hsa_cfun->m_output_arg, INSERT); gcc_assert (!*slot); *slot = hsa_cfun->m_output_arg; } } /* Generate function representation that corresponds to a function declaration. */ hsa_function_representation * hsa_generate_function_declaration (tree decl) { hsa_function_representation *fun = new hsa_function_representation (decl, false, 0); fun->m_declaration_p = true; fun->m_name = get_brig_function_name (decl); gen_function_decl_parameters (fun, decl); return fun; } /* Generate function representation that corresponds to an internal FN. */ hsa_function_representation * hsa_generate_internal_fn_decl (hsa_internal_fn *fn) { hsa_function_representation *fun = new hsa_function_representation (fn); fun->m_name = fn->name (); for (unsigned i = 0; i < fn->get_arity (); i++) { hsa_symbol *arg = new hsa_symbol (fn->get_argument_type (i), BRIG_SEGMENT_ARG, BRIG_LINKAGE_NONE); arg->m_name_number = i; fun->m_input_args.safe_push (arg); } fun->m_output_arg = new hsa_symbol (fn->get_argument_type (-1), BRIG_SEGMENT_ARG, BRIG_LINKAGE_NONE); fun->m_output_arg->m_name = "res"; return fun; } /* Return true if switch statement S can be transformed to a SBR instruction in HSAIL. */ static bool transformable_switch_to_sbr_p (gswitch *s) { /* Identify if a switch statement can be transformed to SBR instruction, like: sbr_u32 $s1 [@label1, @label2, @label3]; */ tree size = get_switch_size (s); if (!tree_fits_uhwi_p (size)) return false; if (tree_to_uhwi (size) > HSA_MAXIMUM_SBR_LABELS) return false; return true; } /* Structure hold connection between PHI nodes and immediate values hold by there nodes. */ class phi_definition { public: phi_definition (unsigned phi_i, unsigned label_i, tree imm): phi_index (phi_i), label_index (label_i), phi_value (imm) {} unsigned phi_index; unsigned label_index; tree phi_value; }; /* Sum slice of a vector V, starting from index START and ending at the index END - 1. */ template <typename T> static T sum_slice (const auto_vec <T> &v, unsigned start, unsigned end, T zero) { T s = zero; for (unsigned i = start; i < end; i++) s += v[i]; return s; } /* Function transforms GIMPLE SWITCH statements to a series of IF statements. Let's assume following example: L0: switch (index) case C1: L1: hard_work_1 (); break; case C2..C3: L2: hard_work_2 (); break; default: LD: hard_work_3 (); break; The transformation encompasses following steps: 1) all immediate values used by edges coming from the switch basic block are saved 2) all these edges are removed 3) the switch statement (in L0) is replaced by: if (index == C1) goto L1; else goto L1'; 4) newly created basic block Lx' is used for generation of a next condition 5) else branch of the last condition goes to LD 6) fix all immediate values in PHI nodes that were propagated though edges that were removed in step 2 Note: if a case is made by a range C1..C2, then process following transformation: switch_cond_op1 = C1 <= index; switch_cond_op2 = index <= C2; switch_cond_and = switch_cond_op1 & switch_cond_op2; if (switch_cond_and != 0) goto Lx; else goto Ly; */ static bool convert_switch_statements (void) { basic_block bb; bool modified_cfg = false; FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator gsi = gsi_last_bb (bb); if (gsi_end_p (gsi)) continue; gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_SWITCH) { gswitch *s = as_a <gswitch *> (stmt); /* If the switch can utilize SBR insn, skip the statement. */ if (transformable_switch_to_sbr_p (s)) continue; modified_cfg = true; unsigned labels = gimple_switch_num_labels (s); tree index = gimple_switch_index (s); tree index_type = TREE_TYPE (index); tree default_label = gimple_switch_default_label (s); basic_block default_label_bb = label_to_block (cfun, CASE_LABEL (default_label)); basic_block cur_bb = bb; auto_vec <edge> new_edges; auto_vec <phi_definition *> phi_todo_list; auto_vec <profile_count> edge_counts; auto_vec <profile_probability> edge_probabilities; /* Investigate all labels that and PHI nodes in these edges which should be fixed after we add new collection of edges. */ for (unsigned i = 0; i < labels; i++) { basic_block label_bb = gimple_switch_label_bb (cfun, s, i); edge e = find_edge (bb, label_bb); edge_counts.safe_push (e->count ()); edge_probabilities.safe_push (e->probability); gphi_iterator phi_gsi; /* Save PHI definitions that will be destroyed because of an edge is going to be removed. */ unsigned phi_index = 0; for (phi_gsi = gsi_start_phis (e->dest); !gsi_end_p (phi_gsi); gsi_next (&phi_gsi)) { gphi *phi = phi_gsi.phi (); for (unsigned j = 0; j < gimple_phi_num_args (phi); j++) { if (gimple_phi_arg_edge (phi, j) == e) { tree imm = gimple_phi_arg_def (phi, j); phi_definition *p = new phi_definition (phi_index, i, imm); phi_todo_list.safe_push (p); break; } } phi_index++; } } /* Remove all edges for the current basic block. */ for (int i = EDGE_COUNT (bb->succs) - 1; i >= 0; i--) { edge e = EDGE_SUCC (bb, i); remove_edge (e); } /* Iterate all non-default labels. */ for (unsigned i = 1; i < labels; i++) { tree label = gimple_switch_label (s, i); tree low = CASE_LOW (label); tree high = CASE_HIGH (label); if (!useless_type_conversion_p (TREE_TYPE (low), index_type)) low = fold_convert (index_type, low); gimple_stmt_iterator cond_gsi = gsi_last_bb (cur_bb); gimple *c = NULL; if (high) { tree tmp1 = make_temp_ssa_name (boolean_type_node, NULL, "switch_cond_op1"); gimple *assign1 = gimple_build_assign (tmp1, LE_EXPR, low, index); tree tmp2 = make_temp_ssa_name (boolean_type_node, NULL, "switch_cond_op2"); if (!useless_type_conversion_p (TREE_TYPE (high), index_type)) high = fold_convert (index_type, high); gimple *assign2 = gimple_build_assign (tmp2, LE_EXPR, index, high); tree tmp3 = make_temp_ssa_name (boolean_type_node, NULL, "switch_cond_and"); gimple *assign3 = gimple_build_assign (tmp3, BIT_AND_EXPR, tmp1, tmp2); gsi_insert_before (&cond_gsi, assign1, GSI_SAME_STMT); gsi_insert_before (&cond_gsi, assign2, GSI_SAME_STMT); gsi_insert_before (&cond_gsi, assign3, GSI_SAME_STMT); tree b = constant_boolean_node (false, boolean_type_node); c = gimple_build_cond (NE_EXPR, tmp3, b, NULL, NULL); } else c = gimple_build_cond (EQ_EXPR, index, low, NULL, NULL); gimple_set_location (c, gimple_location (stmt)); gsi_insert_before (&cond_gsi, c, GSI_SAME_STMT); basic_block label_bb = label_to_block (cfun, CASE_LABEL (label)); edge new_edge = make_edge (cur_bb, label_bb, EDGE_TRUE_VALUE); profile_probability prob_sum = sum_slice <profile_probability> (edge_probabilities, i, labels, profile_probability::never ()) + edge_probabilities[0]; if (prob_sum.initialized_p ()) new_edge->probability = edge_probabilities[i] / prob_sum; new_edges.safe_push (new_edge); if (i < labels - 1) { /* Prepare another basic block that will contain next condition. */ basic_block next_bb = create_empty_bb (cur_bb); if (current_loops) { add_bb_to_loop (next_bb, cur_bb->loop_father); loops_state_set (LOOPS_NEED_FIXUP); } edge next_edge = make_edge (cur_bb, next_bb, EDGE_FALSE_VALUE); next_edge->probability = new_edge->probability.invert (); next_bb->count = next_edge->count (); cur_bb = next_bb; } else /* Link last IF statement and default label of the switch. */ { edge e = make_edge (cur_bb, default_label_bb, EDGE_FALSE_VALUE); e->probability = new_edge->probability.invert (); new_edges.safe_insert (0, e); } } /* Restore original PHI immediate value. */ for (unsigned i = 0; i < phi_todo_list.length (); i++) { phi_definition *phi_def = phi_todo_list[i]; edge new_edge = new_edges[phi_def->label_index]; gphi_iterator it = gsi_start_phis (new_edge->dest); for (unsigned i = 0; i < phi_def->phi_index; i++) gsi_next (&it); gphi *phi = it.phi (); add_phi_arg (phi, phi_def->phi_value, new_edge, UNKNOWN_LOCATION); delete phi_def; } /* Remove the original GIMPLE switch statement. */ gsi_remove (&gsi, true); } } if (dump_file) dump_function_to_file (current_function_decl, dump_file, TDF_DETAILS); return modified_cfg; } /* Expand builtins that can't be handled by HSA back-end. */ static void expand_builtins () { basic_block bb; FOR_EACH_BB_FN (bb, cfun) { for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_CALL) continue; gcall *call = as_a <gcall *> (stmt); if (!gimple_call_builtin_p (call, BUILT_IN_NORMAL)) continue; tree fndecl = gimple_call_fndecl (stmt); enum built_in_function fn = DECL_FUNCTION_CODE (fndecl); switch (fn) { case BUILT_IN_CEXPF: case BUILT_IN_CEXPIF: case BUILT_IN_CEXPI: { /* Similar to builtins.c (expand_builtin_cexpi), the builtin can be transformed to: cexp(I * z) = ccos(z) + I * csin(z). */ tree lhs = gimple_call_lhs (stmt); tree rhs = gimple_call_arg (stmt, 0); tree rhs_type = TREE_TYPE (rhs); bool float_type_p = rhs_type == float_type_node; tree real_part = make_temp_ssa_name (rhs_type, NULL, "cexp_real_part"); tree imag_part = make_temp_ssa_name (rhs_type, NULL, "cexp_imag_part"); tree cos_fndecl = mathfn_built_in (rhs_type, fn == float_type_p ? BUILT_IN_COSF : BUILT_IN_COS); gcall *cos = gimple_build_call (cos_fndecl, 1, rhs); gimple_call_set_lhs (cos, real_part); gsi_insert_before (&gsi, cos, GSI_SAME_STMT); tree sin_fndecl = mathfn_built_in (rhs_type, fn == float_type_p ? BUILT_IN_SINF : BUILT_IN_SIN); gcall *sin = gimple_build_call (sin_fndecl, 1, rhs); gimple_call_set_lhs (sin, imag_part); gsi_insert_before (&gsi, sin, GSI_SAME_STMT); gassign *assign = gimple_build_assign (lhs, COMPLEX_EXPR, real_part, imag_part); gsi_insert_before (&gsi, assign, GSI_SAME_STMT); gsi_remove (&gsi, true); break; } default: break; } } } } /* Emit HSA module variables that are global for the entire module. */ static void emit_hsa_module_variables (void) { hsa_num_threads = new hsa_symbol (BRIG_TYPE_U32, BRIG_SEGMENT_PRIVATE, BRIG_LINKAGE_MODULE, true); hsa_num_threads->m_name = "hsa_num_threads"; hsa_brig_emit_omp_symbols (); } /* Generate HSAIL representation of the current function and write into a special section of the output file. If KERNEL is set, the function will be considered an HSA kernel callable from the host, otherwise it will be compiled as an HSA function callable from other HSA code. */ static void generate_hsa (bool kernel) { hsa_init_data_for_cfun (); if (hsa_num_threads == NULL) emit_hsa_module_variables (); bool modified_cfg = convert_switch_statements (); /* Initialize hsa_cfun. */ hsa_cfun = new hsa_function_representation (cfun->decl, kernel, SSANAMES (cfun)->length (), modified_cfg); hsa_cfun->init_extra_bbs (); if (flag_tm) { HSA_SORRY_AT (UNKNOWN_LOCATION, "support for HSA does not implement transactional memory"); goto fail; } verify_function_arguments (cfun->decl); if (hsa_seen_error ()) goto fail; hsa_cfun->m_name = get_brig_function_name (cfun->decl); gen_function_def_parameters (); if (hsa_seen_error ()) goto fail; init_prologue (); gen_body_from_gimple (); if (hsa_seen_error ()) goto fail; if (hsa_cfun->m_kernel_dispatch_count) init_hsa_num_threads (); if (hsa_cfun->m_kern_p) { hsa_function_summary *s = hsa_summaries->get_create (cgraph_node::get (hsa_cfun->m_decl)); hsa_add_kern_decl_mapping (current_function_decl, hsa_cfun->m_name, hsa_cfun->m_maximum_omp_data_size, s->m_gridified_kernel_p); } if (flag_checking) { for (unsigned i = 0; i < hsa_cfun->m_ssa_map.length (); i++) if (hsa_cfun->m_ssa_map[i]) hsa_cfun->m_ssa_map[i]->verify_ssa (); basic_block bb; FOR_EACH_BB_FN (bb, cfun) { hsa_bb *hbb = hsa_bb_for_bb (bb); for (hsa_insn_basic *insn = hbb->m_first_insn; insn; insn = insn->m_next) insn->verify (); } } hsa_regalloc (); hsa_brig_emit_function (); fail: hsa_deinit_data_for_cfun (); } namespace { const pass_data pass_data_gen_hsail = { GIMPLE_PASS, "hsagen", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; class pass_gen_hsail : public gimple_opt_pass { public: pass_gen_hsail (gcc::context *ctxt) : gimple_opt_pass(pass_data_gen_hsail, ctxt) {} /* opt_pass methods: */ bool gate (function *); unsigned int execute (function *); }; // class pass_gen_hsail /* Determine whether or not to run generation of HSAIL. */ bool pass_gen_hsail::gate (function *f) { return hsa_gen_requested_p () && hsa_gpu_implementation_p (f->decl); } unsigned int pass_gen_hsail::execute (function *) { cgraph_node *node = cgraph_node::get_create (current_function_decl); hsa_function_summary *s = hsa_summaries->get_create (node); expand_builtins (); generate_hsa (s->m_kind == HSA_KERNEL); TREE_ASM_WRITTEN (current_function_decl) = 1; return TODO_discard_function; } } // anon namespace /* Create the instance of hsa gen pass. */ gimple_opt_pass * make_pass_gen_hsail (gcc::context *ctxt) { return new pass_gen_hsail (ctxt); }
GB_convert_bitmap_worker.c
//------------------------------------------------------------------------------ // GB_convert_bitmap_worker: construct triplets or CSC/CSR from bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If A is iso and Ax_new is not NULL, the iso scalar is expanded into the // non-iso array Ax_new. Otherwise, if Ax_new and Ax are NULL then no values // are extracted. // TODO allow this function to do typecasting. Create 169 different versions // for all 13x13 versions. Use this as part of Method 24, C=A assignment. // Can also use typecasting for GB_Matrix_diag. #include "GB.h" #include "GB_partition.h" GrB_Info GB_convert_bitmap_worker // extract CSC/CSR or triplets from bitmap ( // outputs: int64_t *restrict Ap, // vector pointers for CSC/CSR form int64_t *restrict Ai, // indices for CSC/CSR or triplet form int64_t *restrict Aj, // vector indices for triplet form GB_void *restrict Ax_new, // values for CSC/CSR or triplet form int64_t *anvec_nonempty, // # of non-empty vectors // inputs: not modified const GrB_Matrix A, // matrix to extract; not modified GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (GB_IS_BITMAP (A)) ; ASSERT (Ap != NULL) ; // must be provided on input, size avdim+1 int64_t *restrict W = NULL ; size_t W_size = 0 ; const int64_t avdim = A->vdim ; const int64_t avlen = A->vlen ; const size_t asize = A->type->size ; //-------------------------------------------------------------------------- // count the entries in each vector //-------------------------------------------------------------------------- const int8_t *restrict Ab = A->b ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (avlen*avdim, chunk, nthreads_max) ; bool by_vector = (nthreads <= avdim) ; if (by_vector) { //---------------------------------------------------------------------- // compute all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (:,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Ap [j] = ajnz ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- // allocate one row of W per thread, each row of length avdim W = GB_MALLOC_WORK (nthreads * avdim, int64_t, &W_size) ; if (W == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (istart:iend-1,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Wtask [j] = ajnz ; } } // cumulative sum to compute nnz(A(:,j)) for each vector j int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { int64_t ajnz = 0 ; for (int taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t c = Wtask [j] ; Wtask [j] = ajnz ; ajnz += c ; } Ap [j] = ajnz ; } } //-------------------------------------------------------------------------- // cumulative sum of Ap //-------------------------------------------------------------------------- int nth = GB_nthreads (avdim, chunk, nthreads_max) ; GB_cumsum (Ap, avdim, anvec_nonempty, nth, Context) ; int64_t anz = Ap [avdim] ; ASSERT (anz == A->nvals) ; //-------------------------------------------------------------------------- // gather the pattern and values from the bitmap //-------------------------------------------------------------------------- // TODO: add type-specific versions for built-in types const GB_void *restrict Ax = (GB_void *) (A->x) ; const bool A_iso = A->iso ; const bool numeric = (Ax_new != NULL && Ax != NULL) ; if (by_vector) { //---------------------------------------------------------------------- // construct all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (numeric) { // Ax_new [pnew] = Ax [p]) memcpy (Ax_new +(pnew)*asize, Ax +(A_iso ? 0:(p)*asize), asize) ; } pnew++ ; } } ASSERT (pnew == Ap [j+1]) ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *restrict Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] + Wtask [j] ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (numeric) { // Ax_new [pnew] = Ax [p] ; memcpy (Ax_new +(pnew)*asize, Ax +(A_iso ? 0:(p)*asize), asize) ; } pnew++ ; } } } } } //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- GB_FREE_WORK (&W, W_size) ; return (GrB_SUCCESS) ; }
pooling_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld2 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v4.4s, v5.4s}, [%3], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld2 {v6.4s, v7.4s}, [%1], #32 \n" "fmax v12.4s, v0.4s, v1.4s \n" "fmax v13.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "fmax v14.4s, v4.4s, v5.4s \n" "ext v0.16b, v0.16b, v6.16b, #4 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v10.4s, v11.4s}, [%3], #32 \n" "ext v2.16b, v2.16b, v8.16b, #4 \n" "fmax v12.4s, v12.4s, v0.4s \n" "ext v4.16b, v4.16b, v10.16b, #4 \n" "fmax v13.4s, v13.4s, v2.4s \n" "fmax v14.4s, v14.4s, v4.4s \n" "fmax v12.4s, v12.4s, v13.4s \n" "orr v0.16b, v6.16b, v6.16b \n" "orr v1.16b, v7.16b, v7.16b \n" "fmax v12.4s, v12.4s, v14.4s \n" "orr v2.16b, v8.16b, v8.16b \n" "orr v3.16b, v9.16b, v9.16b \n" "orr v4.16b, v10.16b, v10.16b \n" "orr v5.16b, v11.16b, v11.16b \n" "subs %w0, %w0, #1 \n" "st1 {v12.4s}, [%4], #16 \n" "bne 0b \n" "sub %1, %1, #32 \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14" ); } #else if (nn > 0) { asm volatile( "pld [%1, #256] \n" "vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld2.f32 {d8-d11}, [%3]! \n" "0: \n" "pld [%1, #256] \n" "vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15 "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" "vmax.f32 q14, q4, q5 \n" "vext.32 q0, q0, q6, #1 \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" "vext.32 q2, q2, q8, #1 \n" "vmax.f32 q12, q12, q0 \n" "vext.32 q4, q4, q10, #1 \n" "vmax.f32 q13, q13, q2 \n" "vmax.f32 q14, q14, q4 \n" "vmax.f32 q12, q12, q13 \n" "vorr q0, q6, q6 \n" "vorr q1, q7, q7 \n" "vmax.f32 q12, q12, q14 \n" "vorr q2, q8, q8 \n" "vorr q3, q9, q9 \n" "vorr q4, q10, q10 \n" "vorr q5, q11, q11 \n" "subs %0, #1 \n" "vst1.f32 {d24-d25}, [%4]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #32 \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(std::max(r0[0], r0[1]), r0[2]); float max1 = std::max(std::max(r1[0], r1[1]), r1[2]); float max2 = std::max(std::max(r2[0], r2[1]), r2[2]); *outptr = std::max(std::max(max0, max1), max2); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep;//1 + w; r1 += tailstep;//1 + w; r2 += tailstep;//1 + w; } } }
imageproc.c
/* ---------------------------------------------------------------------------- * @file imageproc.c * @brief image processing functions for ppm and pgm types * * @author Jake Michael, [email protected] * @course ECEN 5763: EMVIA, Summer 2021 *---------------------------------------------------------------------------*/ #include<stdlib.h> #include<stdint.h> #include<stdio.h> #include<errno.h> #include<omp.h> #include"imageio.h" #include"imageproc.h" // see .h for more details int rgb_diff(ppm_img_t *const frame, ppm_img_t *const bg, ppm_img_t *diff) { int i; int temp; if (!frame || !bg || !diff) return -1; // use openmp for speedup: #pragma omp parallel for num_threads(5) \ default(none) private(i, temp) shared(diff) for (i=0; i<bg->hres*bg->vres; i++) { // rgb subtraction on ea. channel and keep within 0-255 intensity temp = frame->pixel[i].r - bg->pixel[i].r; if (temp < 0) temp = 0; if (temp > 255) temp = 255; diff->pixel[i].r = temp; temp = frame->pixel[i].g - bg->pixel[i].g; if (temp < 0) temp = 0; if (temp > 255) temp = 255; diff->pixel[i].g = temp; temp = frame->pixel[i].b - bg->pixel[i].b; if (temp < 0) temp = 0; if (temp > 255) temp = 255; diff->pixel[i].b = temp; } return 0; } // see .h for more details int kernel_3x3(int i0, int j0, pgm_img_t *pgm, uint8_t *P) { if (i0<1 || j0<1 || !pgm || !P) { printf("kernel_3x3 error"); return -1; } P[0] = pgm->pixel[i0*pgm->hres+j0]; P[1] = pgm->pixel[i0*pgm->hres+j0+1]; P[2] = pgm->pixel[(i0-1)*pgm->hres+j0+1]; P[3] = pgm->pixel[(i0-1)*pgm->hres+j0]; P[4] = pgm->pixel[(i0-1)*pgm->hres+j0-1]; P[5] = pgm->pixel[i0*pgm->hres+j0-1]; P[6] = pgm->pixel[(i0+1)*pgm->hres+j0-1]; P[7] = pgm->pixel[(i0+1)*pgm->hres+j0]; P[8] = pgm->pixel[(i0+1)*pgm->hres+j0+1]; return 0; } // see .h for more details int median_filter(pgm_img_t *pgm) { static uint8_t hist[256]; uint8_t kernel[9]; int i; int sum; // allocate space for the new filtered pixels uint8_t* pixel_filt = malloc(sizeof(uint8_t)*pgm->vres*pgm->hres); if (!pixel_filt) { perror("pixel_filt malloc failed"); return -1; } // zero out hist memset(&hist, 0, 256); for (int r=1; r < pgm->vres-1; r++) { for (int c=1; c < pgm->hres-1; c++) { kernel_3x3(r, c, pgm, kernel); for (int m=0; m<9; m++) hist[kernel[m]]++; // no need to sort, extracts 5th largest neighbor in kernel i = 0; sum = 0; while(sum < 5) { sum = sum + hist[i]; i += 1; } pixel_filt[r*pgm->hres+c] = i-1; for (int m=0; m<9; m++) hist[kernel[m]] = 0; } } // free old pixel data free(pgm->pixel); // set new pointer to filtered data pgm->pixel = pixel_filt; return 0; }
hello_world.c
// OpenMP Basic Example #include <omp.h> #include <stdio.h> #include <stdlib.h> int main( int argc, char** argv ) { int num_threads = 0; // Number of Threads int thread_id = 0; // ID Number of Running Thread #pragma omp parallel private( num_threads, thread_id ) { // Get the Thread Number thread_id = omp_get_thread_num( ); printf( "Hello World from Thread %d\n", thread_id ); // Have Master Print Total Number of Threads Used if( thread_id == 0 ) { num_threads = omp_get_num_threads( ); printf( "Number of Threads = %d\n", num_threads ); } } return 0; } // End hello_world.c - EWG SDG
omptarget.h
//===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the declarations of all library macros, types, // and functions. // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_H #define OMPTARGET_H #include "common/allocator.h" #include "common/debug.h" // debug #include "common/state-queue.h" #include "common/support.h" #include "interface.h" // interfaces with omp, compiler, and user #include "target_impl.h" #define OMPTARGET_NVPTX_VERSION 1.1 // used by the library for the interface with the app #define DISPATCH_FINISHED 0 #define DISPATCH_NOTFINISHED 1 // used by dynamic scheduling #define FINISHED 0 #define NOT_FINISHED 1 #define LAST_CHUNK 2 #define BARRIER_COUNTER 0 #define ORDERED_COUNTER 1 // Worker slot type which is initialized with the default worker slot // size of 4*32 bytes. struct __kmpc_data_sharing_slot { __kmpc_data_sharing_slot *Next; __kmpc_data_sharing_slot *Prev; void *PrevSlotStackPtr; void *DataEnd; char Data[DS_Worker_Warp_Slot_Size]; }; //////////////////////////////////////////////////////////////////////////////// // task ICV and (implicit & explicit) task state class omptarget_nvptx_TaskDescr { public: // methods for flags INLINE omp_sched_t GetRuntimeSched() const; INLINE void SetRuntimeSched(omp_sched_t sched); INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; } INLINE int InL2OrHigherParallelRegion() const { return items.flags & TaskDescr_InParL2P; } INLINE int IsParallelConstruct() const { return items.flags & TaskDescr_IsParConstr; } INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); } // methods for other fields INLINE uint16_t &ThreadId() { return items.threadId; } INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; } INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; } INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) { prev = taskDescr; } // init & copy INLINE void InitLevelZeroTaskDescr(); INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr); INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr); INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr, uint16_t tid, uint16_t tnum); INLINE void SaveLoopData(); INLINE void RestoreLoopData() const; private: // bits for flags: (6 used, 2 free) // 3 bits (SchedMask) for runtime schedule // 1 bit (InPar) if this thread has encountered one or more parallel region // 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task) // 1 bit (InParL2+) if this thread has encountered L2 or higher parallel // region static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4); static const uint8_t TaskDescr_InPar = 0x10; static const uint8_t TaskDescr_IsParConstr = 0x20; static const uint8_t TaskDescr_InParL2P = 0x40; struct SavedLoopDescr_items { int64_t loopUpperBound; int64_t nextLowerBound; int64_t chunk; int64_t stride; kmp_sched_t schedule; } loopData; struct TaskDescr_items { uint8_t flags; // 6 bit used (see flag above) uint8_t unused; uint16_t threadId; // thread id uint64_t runtimeChunkSize; // runtime chunk size } items; omptarget_nvptx_TaskDescr *prev; }; // build on kmp typedef struct omptarget_nvptx_ExplicitTaskDescr { omptarget_nvptx_TaskDescr taskDescr; // omptarget_nvptx task description (must be first) kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last) } omptarget_nvptx_ExplicitTaskDescr; //////////////////////////////////////////////////////////////////////////////// // Descriptor of a parallel region (worksharing in general) class omptarget_nvptx_WorkDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; } private: omptarget_nvptx_TaskDescr masterTaskICV; }; //////////////////////////////////////////////////////////////////////////////// class omptarget_nvptx_TeamDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() { return &levelZeroTaskDescr; } INLINE omptarget_nvptx_WorkDescr &WorkDescr() { return workDescrForActiveParallel; } // init INLINE void InitTeamDescr(); INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) { worker_rootS[wid].DataEnd = &worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size; // We currently do not have a next slot. worker_rootS[wid].Next = 0; worker_rootS[wid].Prev = 0; worker_rootS[wid].PrevSlotStackPtr = 0; return (__kmpc_data_sharing_slot *)&worker_rootS[wid]; } private: omptarget_nvptx_TaskDescr levelZeroTaskDescr; // icv for team master initial thread omptarget_nvptx_WorkDescr workDescrForActiveParallel; // one, ONLY for the active par ALIGN(16) __kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number]; }; //////////////////////////////////////////////////////////////////////////////// // thread private data (struct of arrays for better coalescing) // tid refers here to the global thread id // do not support multiple concurrent kernel a this time class omptarget_nvptx_ThreadPrivateContext { public: // task INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) { return &levelOneTaskDescr[tid]; } INLINE void SetTopLevelTaskDescr(int tid, omptarget_nvptx_TaskDescr *taskICV) { topTaskDescr[tid] = taskICV; } INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const; // parallel INLINE uint16_t &NumThreadsForNextParallel(int tid) { return nextRegion.tnum[tid]; } // schedule (for dispatch) INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; } INLINE int64_t &Chunk(int tid) { return chunk[tid]; } INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; } INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; } INLINE int64_t &Stride(int tid) { return stride[tid]; } INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; } INLINE void InitThreadPrivateContext(int tid); INLINE uint64_t &Cnt() { return cnt; } private: // team context for this team omptarget_nvptx_TeamDescr teamContext; // task ICV for implicit threads in the only parallel region omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM]; // pointer where to find the current task ICV (top of the stack) omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM]; union { // Only one of the two is live at the same time. // parallel uint16_t tnum[MAX_THREADS_PER_TEAM]; } nextRegion; // schedule (for dispatch) kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for int64_t chunk[MAX_THREADS_PER_TEAM]; int64_t loopUpperBound[MAX_THREADS_PER_TEAM]; // state for dispatch with dyn/guided OR static (never use both at a time) int64_t nextLowerBound[MAX_THREADS_PER_TEAM]; int64_t stride[MAX_THREADS_PER_TEAM]; uint64_t cnt; }; /// Memory manager for statically allocated memory. class omptarget_nvptx_SimpleMemoryManager { private: struct MemDataTy { volatile unsigned keys[OMP_STATE_COUNT]; } MemData[MAX_SM] ALIGN(128); INLINE static uint32_t hash(unsigned key) { return key & (OMP_STATE_COUNT - 1); } public: INLINE void Release(); INLINE const void *Acquire(const void *buf, size_t size); }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager; extern uint32_t EXTERN_SHARED(usedMemIdx); extern uint32_t EXTERN_SHARED(usedSlotIdx); #if _OPENMP extern uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE]; #pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc) #else extern uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE]; #endif extern uint16_t EXTERN_SHARED(threadLimit); extern uint16_t EXTERN_SHARED(threadsInTeam); extern uint16_t EXTERN_SHARED(nThreads); extern omptarget_nvptx_ThreadPrivateContext * EXTERN_SHARED(omptarget_nvptx_threadPrivateContext); extern uint32_t EXTERN_SHARED(execution_param); extern void *EXTERN_SHARED(ReductionScratchpadPtr); //////////////////////////////////////////////////////////////////////////////// // work function (outlined parallel/simd functions) and arguments. // needed for L1 parallelism only. //////////////////////////////////////////////////////////////////////////////// typedef void *omptarget_nvptx_WorkFn; extern omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn); //////////////////////////////////////////////////////////////////////////////// // get private data structures //////////////////////////////////////////////////////////////////////////////// INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor(); INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor(); INLINE omptarget_nvptx_TaskDescr * getMyTopTaskDescriptor(bool isSPMDExecutionMode); INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId); //////////////////////////////////////////////////////////////////////////////// // inlined implementation //////////////////////////////////////////////////////////////////////////////// INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); } INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); } INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); } INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); } #include "common/omptargeti.h" #endif
es3.h
#ifndef es3_h #define es3_h #include <iostream> #include <omp.h> #define nt 8 #define ns 1000000000 #define PAD 8 using namespace std; //rendere le sum dinamiche void output(double pig, double time) { cout << "Time: " << time << endl << "PiGreco: " << pig << endl; } void ciclica(unsigned nmt) { int i, nthreads; double pig, sum[nt], step = 1.0/(double) ns; double start = omp_get_wtime(); omp_set_num_threads(nt); #pragma omp parallel num_threads(nmt) { int i, id, n; double x; id = omp_get_thread_num(); n = omp_get_num_threads(); if (id == 0) nthreads = n; for (i = id, sum[id] = 0.0; i < ns; i += n) { x = (i+0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } double end = omp_get_wtime(); for (i = 0, pig = 0.0; i < nthreads; i++) { pig += sum[i] * step; } output(pig, end - start); } void critica(unsigned nmt) { int i, nthreads; double pig, step = 1.0/(double) ns; double start = omp_get_wtime(); #pragma omp parallel num_threads(nmt) { int i, id, n; double x, sum; id = omp_get_thread_num(); n = omp_get_num_threads(); if (id == 0) nthreads = n; id = omp_get_num_threads(); for (i = id, sum = 0.0; i < ns; i += nthreads) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } #pragma omp critical pig += sum * step; } double end = omp_get_wtime(); output(pig, end - start); } void padding(unsigned nmt) { int i, nthreads; double pig, sum[nt][PAD], step = 1.0/(double) ns; double start = omp_get_wtime(); omp_set_num_threads(nt); #pragma omp parallel num_threads(nmt) { int i, id, n; double x; id = omp_get_thread_num(); n = omp_get_num_threads(); if (id == 0) nthreads = n; for (i = id, sum[id][0] = 0.0; i < ns; i += n) { x = (i+0.5) * step; sum[id][0] += 4.0 / (1.0 + x * x); } } double end = omp_get_wtime(); for (i = 0, pig = 0.0; i < nthreads; i++) { pig += sum[i][0] * step; } output(pig, end - start); } void reduction(unsigned nmt) { double area = 0.0, pig, x; int i, n; n = 100000; double start = omp_get_wtime(); #pragma omp parallel for private(x) reduction(+ : area) num_threads(nmt) for (i = 0; i < n; i++) { x = (i + 0.5) /n; area += 4.0 / (1.0 + x * x); } pig = area / n; double end = omp_get_wtime(); output(pig, end - start); } void monteCarlo(unsigned nmt) { long num_trials = 100000000, i, Ncirc = 0; double pig, x, y, r = 1.0; double start = omp_get_wtime(); #pragma omp parallel private(x, y) reduction(+ : Ncirc) num_threads(nmt) { unsigned int seed = (unsigned int) (omp_get_wtime()*10000.0*(double)omp_get_thread_num()/pi); #pragma omp for for (i = 0; i < num_trials; i++) { x = (double) rand_r(&seed) / RAND_MAX; y = (double) rand_r(&seed) / RAND_MAX; if (((x * x) + (y * y)) <= 1) { Ncirc++; } } } pig = 4.0 * ((double) Ncirc / (double) num_trials); double end = omp_get_wtime(); output(pig, end - start); } void es3() { cout << "Inserisci numero threads" << endl; unsigned nmt; cin >> nmt; cout << "Calcolo piGreco con distribuzione ciclica "; ciclica(nmt); cout << endl << "Calcolo piGreco con sezione critica "; critica(nmt); cout << endl << "Calcolo piGreco con padding "; padding(nmt); cout << endl; cout << endl << "Calcolo piGreco con reduction "; reduction(nmt); cout << endl << "Calcolo piGreco con Monte Carlo "; monteCarlo(nmt); cout << endl; } #endif
bt.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include <nautilus/shell.h> /* global variables */ #include "header.h" #define calloc(n,s) ({ void *_p=malloc(n*s);memset(_p,0,n*s); _p;}) /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double llhs[5][5], double c[5][5], double r[5]); static void binvrhs(double llhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); static int program_BT(char *__buf, void* __priv); int program_BT_profile(char *_, void* __); static struct shell_cmd_impl nas_bt_impl = { .cmd = "nas-bt", .help_str = "NAS parallel benchmark BT", .handler = program_BT_profile, }; nk_register_shell_cmd(nas_bt_impl); int program_BT_profile(char *_, void *__){ #ifdef NAUT_CONFIG_PROFILE nk_instrument_clear(); nk_instrument_start(); #endif program_BT(_,__); #ifdef NAUT_CONFIG_PROFILE nk_instrument_end(); nk_instrument_query(); #endif return 0; } static void * __m=0; #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) #define _malloc(n) ({ if (!__m) { __m = malloc(1UL<<33); if(!__m){printf("no __m\n"); }} void *__r = __m; unsigned long long __n = ALIGN(n, 16); __m+=__n; __r; }) static void arr_init_calloc(){ us_ptr = calloc(sizeof(s_matrix_t), 1); vs_ptr = calloc(sizeof(s_matrix_t), 1); ws_ptr = calloc(sizeof(s_matrix_t), 1); qs_ptr = calloc(sizeof(s_matrix_t), 1); rho_i_ptr = calloc(sizeof(s_matrix_t), 1); square_ptr = calloc(sizeof(s_matrix_t), 1); forcing_ptr = calloc(sizeof(f_matrix_t), 1); u_ptr = calloc(sizeof(u_matrix_t), 1); rhs_ptr = calloc(sizeof(rhs_matrix_t), 1); lhs_ptr = calloc(sizeof(lhs_matrix_t), 1); fjac_ptr = calloc(sizeof(jac_matrix_t), 1); njac_ptr = calloc(sizeof(jac_matrix_t), 1); } static void free_arr(){ free(us_ptr); free(vs_ptr); free(ws_ptr); free(qs_ptr); free(rho_i_ptr); free(square_ptr); free(forcing_ptr); free(u_ptr); free(rhs_ptr); free(lhs_ptr); free(fjac_ptr); free(njac_ptr); } /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ static int program_BT(char *__buf, void* __priv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char class; //FILE *fp; //initialize array arr_init_calloc(); /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - BT Benchmark\n\n"); /* fp = fopen("inputbt.data", "r"); */ /* if (fp != NULL) { */ /* printf(" Reading from input file inputbt.data"); */ /* fscanf(fp, "%d", &niter); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%lg", &dt); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%d%d%d", */ /* &grid_points[0], &grid_points[1], &grid_points[2]); */ /* fclose(fp); */ /* } else { */ /* printf(" No input file inputbt.data. Using compiled defaults\n"); */ niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; // } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); printf("nthreads %d\n",nthreads); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &class, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); free_arr(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { #pragma omp parallel compute_rhs(); #pragma omp parallel x_solve(); #pragma omp parallel y_solve(); #pragma omp parallel z_solve(); #pragma omp parallel add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for nowait for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { #pragma omp parallel { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for nowait for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *class = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *class = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *class = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *class = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *class = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double llhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/llhs[0][0]; llhs[0][1] = llhs[0][1]*pivot; llhs[0][2] = llhs[0][2]*pivot; llhs[0][3] = llhs[0][3]*pivot; llhs[0][4] = llhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = llhs[1][0]; llhs[1][1]= llhs[1][1] - coeff*llhs[0][1]; llhs[1][2]= llhs[1][2] - coeff*llhs[0][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[0][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = llhs[2][0]; llhs[2][1]= llhs[2][1] - coeff*llhs[0][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[0][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[0][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = llhs[3][0]; llhs[3][1]= llhs[3][1] - coeff*llhs[0][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[0][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[0][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = llhs[4][0]; llhs[4][1]= llhs[4][1] - coeff*llhs[0][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[0][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[0][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/llhs[1][1]; llhs[1][2] = llhs[1][2]*pivot; llhs[1][3] = llhs[1][3]*pivot; llhs[1][4] = llhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = llhs[0][1]; llhs[0][2]= llhs[0][2] - coeff*llhs[1][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[1][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = llhs[2][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[1][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[1][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = llhs[3][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[1][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[1][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = llhs[4][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[1][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[1][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/llhs[2][2]; llhs[2][3] = llhs[2][3]*pivot; llhs[2][4] = llhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = llhs[0][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[2][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = llhs[1][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[2][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = llhs[3][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[2][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = llhs[4][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[2][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/llhs[3][3]; llhs[3][4] = llhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = llhs[0][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = llhs[1][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = llhs[2][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = llhs[4][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/llhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = llhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = llhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = llhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = llhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double llhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/llhs[0][0]; llhs[0][1] = llhs[0][1]*pivot; llhs[0][2] = llhs[0][2]*pivot; llhs[0][3] = llhs[0][3]*pivot; llhs[0][4] = llhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = llhs[1][0]; llhs[1][1]= llhs[1][1] - coeff*llhs[0][1]; llhs[1][2]= llhs[1][2] - coeff*llhs[0][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[0][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = llhs[2][0]; llhs[2][1]= llhs[2][1] - coeff*llhs[0][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[0][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[0][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = llhs[3][0]; llhs[3][1]= llhs[3][1] - coeff*llhs[0][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[0][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[0][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = llhs[4][0]; llhs[4][1]= llhs[4][1] - coeff*llhs[0][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[0][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[0][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/llhs[1][1]; llhs[1][2] = llhs[1][2]*pivot; llhs[1][3] = llhs[1][3]*pivot; llhs[1][4] = llhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = llhs[0][1]; llhs[0][2]= llhs[0][2] - coeff*llhs[1][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[1][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = llhs[2][1]; llhs[2][2]= llhs[2][2] - coeff*llhs[1][2]; llhs[2][3]= llhs[2][3] - coeff*llhs[1][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = llhs[3][1]; llhs[3][2]= llhs[3][2] - coeff*llhs[1][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[1][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = llhs[4][1]; llhs[4][2]= llhs[4][2] - coeff*llhs[1][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[1][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/llhs[2][2]; llhs[2][3] = llhs[2][3]*pivot; llhs[2][4] = llhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = llhs[0][2]; llhs[0][3]= llhs[0][3] - coeff*llhs[2][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = llhs[1][2]; llhs[1][3]= llhs[1][3] - coeff*llhs[2][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = llhs[3][2]; llhs[3][3]= llhs[3][3] - coeff*llhs[2][3]; llhs[3][4]= llhs[3][4] - coeff*llhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = llhs[4][2]; llhs[4][3]= llhs[4][3] - coeff*llhs[2][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/llhs[3][3]; llhs[3][4] = llhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = llhs[0][3]; llhs[0][4]= llhs[0][4] - coeff*llhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = llhs[1][3]; llhs[1][4]= llhs[1][4] - coeff*llhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = llhs[2][3]; llhs[2][4]= llhs[2][4] - coeff*llhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = llhs[4][3]; llhs[4][4]= llhs[4][4] - coeff*llhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/llhs[4][4]; r[4] = r[4] *pivot; coeff = llhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = llhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = llhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = llhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } }
GB_unaryop__ainv_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_fp64 // op(A') function: GB_tran__ainv_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isle_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_int64 // A.*B function (eWiseMult): GB_AemultB__isle_int64 // A*D function (colscale): GB_AxD__isle_int64 // D*A function (rowscale): GB_DxB__isle_int64 // C+=B function (dense accum): GB_Cdense_accumB__isle_int64 // C+=b function (dense accum): GB_Cdense_accumb__isle_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int64 // C=scalar+B GB_bind1st__isle_int64 // C=scalar+B' GB_bind1st_tran__isle_int64 // C=A+scalar GB_bind2nd__isle_int64 // C=A'+scalar GB_bind2nd_tran__isle_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT64 || GxB_NO_ISLE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isle_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh 3.0 structure translation: F. Conti --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "../common/npb-C.h" #include "npbparams.h" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ static int colidx[NZ+1]; /* colidx[1:NZ] */ static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */ static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */ static int arow[NZ+1]; /* arow[1:NZ] */ static int acol[NZ+1]; /* acol[1:NZ] */ /* common /main_flt_mem/ */ static double v[NA+1+1]; /* v[1:NA+1] */ static double aelt[NZ+1]; /* aelt[1:NZ] */ static double a[NZ+1]; /* a[1:NZ] */ static double x[NA+2+1]; /* x[1:NA+2] */ static double z[NA+2+1]; /* z[1:NA+2] */ static double p[NA+2+1]; /* p[1:NA+2] */ static double q[NA+2+1]; /* q[1:NA+2] */ static double r[NA+2+1]; /* r[1:NA+2] */ //static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static void conj_grad (int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], //double w[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift ); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc, char **argv) { int i, j, k, it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t, mflops; char class; boolean verified; double zeta_verify_value, epsilon; firstrow = 1; lastrow = NA; firstcol = 1; lastcol = NA; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) { class = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) { class = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) { class = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) { class = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", NA); printf(" Iterations: %5d\n", NITER); naa = NA; nzz = NZ; /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc( &tran, amult ); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa, nzz, a, colidx, rowstr, NONZER, firstrow, lastrow, firstcol, lastcol, RCOND, arow, acol, aelt, v, iv, SHIFT); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ { for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } }// end omp parallel zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } norm_temp12 = 1.0 / sqrt( norm_temp12 ); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of do one iteration untimed */ /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } zeta = 0.0; timer_clear( 1 ); timer_start( 1 ); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ for (it = 1; it <= NITER; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } norm_temp12 = 1.0 / sqrt( norm_temp12 ); zeta = SHIFT + 1.0 / norm_temp11; if( it == 1 ) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of main iter inv pow meth */ { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop( 1 ); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read( 1 ); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (class != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); printf(" Error is %20.12e\n", zeta - zeta_verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { mflops = (2.0*NITER*NA) * (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 ) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad ( int colidx[], /* colidx[1:nzz] */ int rowstr[], /* rowstr[1:naa+1] */ double x[], /* x[*] */ double z[], /* z[*] */ double a[], /* a[1:nzz] */ double p[], /* p[*] */ double q[], /* q[*] */ double r[], /* r[*] */ //double w[], /* w[*] */ double *rnorm ) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static int callcount = 0; double d, sum, rho, rho0, alpha, beta; int i, j, k; int cgit, cgitmax = 25; rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ { for (j = 1; j <= naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; //w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } }/* end omp parallel */ /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { rho0 = rho; d = 0.0; rho = 0.0; { /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } //w[j] = sum; q[j] = sum; } /* unrolled-by-two version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ /* for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } */ /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } */ /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ //#pragma omp single alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; // } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) {*/ rho = rho + r[j]*r[j]; } //#pragma omp barrier /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ //#pragma omp single beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } callcount++; } /* end omp parallel */ } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ sum = 0.0; { for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } r[j] = d; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } } //end omp parallel (*rnorm) = sqrt(sum); } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea( int n, int nz, double a[], /* a[1:nz] */ int colidx[], /* colidx[1:nz] */ int rowstr[], /* rowstr[1:n+1] */ int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], /* arow[1:nz] */ int acol[], /* acol[1:nz] */ double aelt[], /* aelt[1:nz] */ double v[], /* v[1:n+1] */ int iv[], /* iv[1:2*n+1] */ double shift ) { int i, nnza, iouter, ivelt, ivelt1, irow, nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size, ratio, scale; int jcol; size = 1.0; ratio = pow(rcond, (1.0 / (double)n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ for (i = 1; i <= n; i++) { colidx[n+i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n])); vecset(n, v, iv, &nzv, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i++) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a, colidx, rowstr, n, arow, acol, aelt, firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( double a[], /* a[1:*] */ int colidx[], /* colidx[1:*] */ int rowstr[], /* rowstr[1:*] */ int n, int arow[], /* arow[1:*] */ int acol[], /* acol[1:*] */ double aelt[], /* aelt[1:*] */ int firstrow, int lastrow, double x[], /* x[1:n] */ boolean mark[], /* mark[1:n] */ int nzloc[], /* nzloc[1:n] */ int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i, j, jajp1, nza, k, nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = FALSE; } rowstr[n+1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c ... preload data pages c---------------------------------------------------------------------*/ for(j = 0;j <= nrows-1;j++) { for(k = rowstr[j];k <= rowstr[j+1]-1;k++) a[k] = 0.0; } /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j--) { rowstr[j+1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = FALSE; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k < rowstr[j+1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; if ( mark[i] == FALSE && x[i] != 0.0) { mark[i] = TRUE; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = FALSE; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j+1]; rowstr[j+1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc( int n, int nz, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int nzloc[], /* nzloc[1:n] */ int mark[] ) /* mark[1:n] */ { int nn1; int nzrow, nzv, ii, i; double vecelt, vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while (nzv < nz) { vecelt = randlc(&tran, amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset( int n, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int *nzv, int i, double val) { int k; boolean set; set = FALSE; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = TRUE; } } if (set == FALSE) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads, 2*sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (size_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { const Quantum *magick_restrict p; Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *magick_unused(exception)) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); magick_unreferenced(exception); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset; if (extent != 0) modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; const Quantum *magick_restrict p; const void *magick_restrict r; Quantum *magick_restrict q; ssize_t i, u; unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double gamma; if (fabs((double) (alpha-TransparentAlpha)) < MagickEpsilon) return(q); gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); return(ClampToQuantum(gamma*MagickOver_((double) p,alpha,(double) q,beta))); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double alpha; ssize_t i; alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(q[i],alpha,p[i],GetPixelAlpha(image,p)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; ssize_t y; unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; Quantum *magick_restrict q; ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static inline MagickBooleanType ValidatePixelOffset(const ssize_t x, const size_t a) { if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a))) return(MagickFalse); if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a)) return(MagickFalse); return(MagickTrue); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit) || (ValidatePixelOffset(x,width) == MagickFalse) || (ValidatePixelOffset(y,height) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const unsigned char *magick_restrict p; ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const Quantum *magick_restrict p; ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
convolution_3x3_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[8][8][packn]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _r06 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _r07 = vle16_v_f16m1(r0 + packn * 7, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r00, _r06, vl), 5.25f, vfsub_vv_f16m1(_r04, _r02, vl), vl); vfloat16m1_t _tmp7m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r07, _r01, vl), 5.25f, vfsub_vv_f16m1(_r03, _r05, vl), vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[7][m], _tmp7m, vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat16m1_t _tmp1m = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp2m = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat16m1_t _tmp3m = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp4m = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_r06, 4.f, vfmacc_vf_f16m1(_r02, -1.25f, _r04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _tmp6m = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); vse16_v_f16m1(tmp[6][m], _tmp6m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; __fp16* r0_tm_6 = r0_tm_0 + tiles * packn * 6; __fp16* r0_tm_7 = r0_tm_0 + tiles * packn * 7; for (int m = 0; m < 8; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f16m1(_tmp04, _tmp02, vl), vl); vfloat16m1_t _r0tm7 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f16m1(_tmp03, _tmp05, vl), vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat16m1_t _r0tm1 = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _r0tm2 = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat16m1_t _r0tm3 = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _r0tm4 = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_tmp06, 4.f, vfmacc_vf_f16m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat16m1_t _r0tm5 = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _r0tm6 = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); vse16_v_f16m1(r0_tm_6, _r0tm6, vl); vse16_v_f16m1(r0_tm_7, _r0tm7, vl); r0_tm_0 += tiles * packn * 8; r0_tm_1 += tiles * packn * 8; r0_tm_2 += tiles * packn * 8; r0_tm_3 += tiles * packn * 8; r0_tm_4 += tiles * packn * 8; r0_tm_5 += tiles * packn * 8; r0_tm_6 += tiles * packn * 8; r0_tm_7 += tiles * packn * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE c99 variable length array __fp16 tmp[6][8][packn]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * packn * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * packn * 7; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * packn; // TODO rvv optimize for (int m = 0; m < 8; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _out0tm6 = vle16_v_f16m1(output0_tm_6, vl); vfloat16m1_t _out0tm7 = vle16_v_f16m1(output0_tm_7, vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat16m1_t _out04 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 4, _out04, vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat16m1_t _out05 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp07, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); vse16_v_f16m1(output0 + packn * 5, _out05, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[6][6][packn]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r04, _r03, vl), -4.f, vfadd_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r03, vl), 4.f, vfsub_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), -2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), 2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp5m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl); vfloat16m1_t _r0tm1 = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm2 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm3 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm4 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm5 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); r0_tm_0 += tiles * packn * 6; r0_tm_1 += tiles * packn * 6; r0_tm_2 += tiles * packn * 6; r0_tm_3 += tiles * packn * 6; r0_tm_4 += tiles * packn * 6; r0_tm_5 += tiles * packn * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE variable length array __fp16 tmp[4][6][packn]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * packn; // TODO rvv optimize for (int m = 0; m < 6; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp02a, vl), _tmp02b, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); output0_tm_0 += tiles * packn * 6; output0_tm_1 += tiles * packn * 6; output0_tm_2 += tiles * packn * 6; output0_tm_3 += tiles * packn * 6; output0_tm_4 += tiles * packn * 6; output0_tm_5 += tiles * packn * 6; } for (int m = 0; m < 4; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
MatrixMXN.h
#pragma once #include "VectorND.h" #include <fstream> template<class T> class MatrixMN { public: int num_rows_; // m_ int num_cols_; // n_ T *values_; MatrixMN() : values_(nullptr), num_rows_(0), num_cols_(0) {} MatrixMN(const int& _m, const int& _n) : values_(nullptr), num_rows_(0), num_cols_(0) {} void initialize(const int& _m, const int& _n) { num_rows_ = _m; num_cols_ = _n; SAFE_DELETE_ARRAY(values_); const int num_all = num_rows_ * num_cols_; { values_ = new T[num_all]; for (int i = 0; i < num_all; i++) values_[i] = (T)0; } } void cout() { for (int row = 0; row < num_rows_; row++) { for (int col = 0; col < num_cols_; col++) { std::cout << getValue(row, col) << " "; } std::cout << std::endl; } } void writeTXT(std::ofstream& of) const { of << num_rows_ << " " << num_cols_ << std::endl; for (int i = 0; i < num_rows_ * num_cols_; i++) { of << values_[i]; if (i != num_rows_ * num_cols_ - 1) of << " "; } of << std::endl; } void multiply(const VectorND<T>& vector, VectorND<T>& result) const { #pragma omp parallel for for (int row = 0; row < num_rows_; row++) { result.values_[row] = (T)0; int ix = row*num_cols_; T temp; for (int col = 0; col < num_cols_; col++, ix++) { temp = values_[ix]; temp *= vector.values_[col]; result.values_[row] += temp; } } } };
rdf.c
/******************************************************************************* ** C extension to calculate the radial distribution function *******************************************************************************/ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> // includes stdio.h, string.h, errno.h, stdlib.h #include <numpy/arrayobject.h> #include <math.h> #include "visclibs/boxeslib.h" #include "visclibs/utilities.h" #include "visclibs/array_utils.h" #include "visclibs/constants.h" #include "gui/preferences.h" #if PY_MAJOR_VERSION >= 3 #define MOD_ERROR_VAL NULL #define MOD_SUCCESS_VAL(val) val #define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void) #define MOD_DEF(ob, name, doc, methods) \ static struct PyModuleDef moduledef = { \ PyModuleDef_HEAD_INIT, name, doc, -1, methods, }; \ ob = PyModule_Create(&moduledef); #else #define MOD_ERROR_VAL #define MOD_SUCCESS_VAL(val) #define MOD_INIT(name) void init##name(void) #define MOD_DEF(ob, name, doc, methods) \ ob = Py_InitModule3(name, methods, doc); #endif static PyObject* calculateRDF(PyObject*, PyObject*); static int computeHistogram(int, int, int*, double*, int*, double*, int*, int*, double, double, double, double*); static void normaliseRDF(int, int, int, int, double, double, double*, double*); /******************************************************************************* ** List of python methods available in this module *******************************************************************************/ static struct PyMethodDef module_methods[] = { {"calculateRDF", calculateRDF, METH_VARARGS, "Calculate the RDF for the selected atoms"}, {NULL, NULL, 0, NULL} }; /******************************************************************************* ** Module initialisation function *******************************************************************************/ MOD_INIT(_rdf) { PyObject *mod; MOD_DEF(mod, "_rdf", "RDF calculation module", module_methods) if (mod == NULL) return MOD_ERROR_VAL; import_array(); return MOD_SUCCESS_VAL(mod); } /******************************************************************************* ** Calculate the radial distribution function for the given selections of ** visible atoms. ** ** Inputs are: ** - visibleAtoms: indices of atoms that are to be used for the calculation ** - specie: array containing the species index for each atom ** - pos: array containing the positions of the atoms ** - specieID1: the species of the first selection of atoms ** - specieID2: the species of the second selection of atoms ** - cellDims: the size of the simulation cell ** - pbc: periodic boundary conditions ** - start: minimum separation to use when constructing the histogram ** - finish: maximum separation to use when constructing the histogram ** - interval: the interval between histogram bins ** - numBins: the number of histogram bins ** - rdf: the result is returned in this array *******************************************************************************/ static PyObject* calculateRDF(PyObject *self, PyObject *args) { int numVisible, *visibleAtoms, *specie, specieID1, specieID2, *pbc, numBins; int numAtoms; double *pos, *cellDims, start, finish, *rdf; PyArrayObject *visibleAtomsIn=NULL; PyArrayObject *specieIn=NULL; PyArrayObject *pbcIn=NULL; PyArrayObject *posIn=NULL; PyArrayObject *cellDimsIn=NULL; PyArrayObject *rdfIn=NULL; int i, status, *sel1, *sel2, sel1cnt, sel2cnt, duplicates; double interval; /* parse and check arguments from Python */ if (!PyArg_ParseTuple(args, "O!O!O!iiO!O!dddiO!", &PyArray_Type, &visibleAtomsIn, &PyArray_Type, &specieIn, &PyArray_Type, &posIn, &specieID1, &specieID2, &PyArray_Type, &cellDimsIn, &PyArray_Type, &pbcIn, &start, &finish, &interval, &numBins, &PyArray_Type, &rdfIn)) return NULL; if (not_intVector(visibleAtomsIn)) return NULL; visibleAtoms = pyvector_to_Cptr_int(visibleAtomsIn); numVisible = (int) PyArray_DIM(visibleAtomsIn, 0); if (not_intVector(specieIn)) return NULL; specie = pyvector_to_Cptr_int(specieIn); numAtoms = (int) PyArray_DIM(specieIn, 0); if (not_doubleVector(posIn)) return NULL; pos = pyvector_to_Cptr_double(posIn); if (not_doubleVector(rdfIn)) return NULL; rdf = pyvector_to_Cptr_double(rdfIn); if (not_doubleVector(cellDimsIn)) return NULL; cellDims = pyvector_to_Cptr_double(cellDimsIn); if (not_intVector(pbcIn)) return NULL; pbc = pyvector_to_Cptr_int(pbcIn); /* initialise result array to zero */ for (i = 0; i < numBins; i++) rdf[i] = 0.0; /* create the selections of atoms and check for number of duplicates */ sel1 = malloc(numVisible * sizeof(int)); if (sel1 == NULL) { PyErr_SetString(PyExc_MemoryError, "Could not allocate sel1"); return NULL; } sel2 = malloc(numVisible * sizeof(int)); if (sel2 == NULL) { PyErr_SetString(PyExc_MemoryError, "Could not allocate sel2"); free(sel1); return NULL; } sel1cnt = 0; sel2cnt = 0; duplicates = 0; for (i = 0; i < numVisible; i++) { int index = visibleAtoms[i]; /* check if this atom is in the first selection (negative means all species) */ if (specieID1 < 0 || specie[index] == specieID1) { sel1[i] = 1; sel1cnt++; } else sel1[i] = 0; /* check if this atom is in the second selection (negative means all species) */ if (specieID2 < 0 || specie[index] == specieID2) { sel2[i] = 1; sel2cnt++; } else sel2[i] = 0; /* count the number of atoms that are in both selections */ if (sel1[i] && sel2[i]) duplicates++; } /* compute the histogram for the RDF */ status = computeHistogram(numAtoms, numVisible, visibleAtoms, pos, pbc, cellDims, sel1, sel2, start, finish, interval, rdf); /* free memory used for selections */ free(sel1); free(sel2); /* return if there was an error */ if (status) return NULL; /* normalise the rdf */ normaliseRDF(numBins, sel1cnt, sel2cnt, duplicates, start, interval, cellDims, rdf); /* return None */ Py_INCREF(Py_None); return Py_None; } /******************************************************************************* ** Compute the histogram for the RDF *******************************************************************************/ static int computeHistogram(int NAtoms, int NVisible, int *visibleAtoms, double *pos, int *PBC, double *cellDims, int *sel1, int *sel2, double start, double finish, double interval, double *hist) { int i, errorCount, boxstat; double *visiblePos, approxBoxWidth; const double start2 = start * start; const double finish2 = finish * finish; struct Boxes *boxes; /* positions of visible atoms */ if (NAtoms == NVisible) visiblePos = pos; else { visiblePos = malloc(3 * NVisible * sizeof(double)); if (visiblePos == NULL) { PyErr_SetString(PyExc_MemoryError, "Could not allocate visiblePos"); return 1; } for (i = 0; i < NVisible; i++) { int index = visibleAtoms[i]; int i3 = 3 * i; int ind3 = 3 * index; visiblePos[i3 ] = pos[ind3 ]; visiblePos[i3 + 1] = pos[ind3 + 1]; visiblePos[i3 + 2] = pos[ind3 + 2]; } } /* spatial decomposition - box width must be at least `finish` */ approxBoxWidth = finish; boxes = setupBoxes(approxBoxWidth, PBC, cellDims); if (boxes == NULL) { if (NAtoms != NVisible) free(visiblePos); return 2; } boxstat = putAtomsInBoxes(NVisible, visiblePos, boxes); if (NAtoms != NVisible) free(visiblePos); if (boxstat) return 3; /* loop over visible atoms */ errorCount = 0; #pragma omp parallel for reduction(+: errorCount) num_threads(prefs_numThreads) for (i = 0; i < NVisible; i++) { int j, index, ind3, boxIndex, boxNebList[27], boxNebListSize; double rxa, rya, rza; /* skip if this atom is not in the first selection */ if (!sel1[i]) continue; /* the index of this atom in the pos array */ index = visibleAtoms[i]; /* position of this atom and its box index */ ind3 = index * 3; rxa = pos[ind3 ]; rya = pos[ind3 + 1]; rza = pos[ind3 + 2]; boxIndex = boxIndexOfAtom(rxa, rya, rza, boxes); if (boxIndex < 0) errorCount++; if (!errorCount) { /* find neighbouring boxes */ boxNebListSize = getBoxNeighbourhood(boxIndex, boxNebList, boxes); /* loop over the box neighbourhood */ for (j = 0; j < boxNebListSize; j++) { int k; int checkBox = boxNebList[j]; for (k = 0; k < boxes->boxNAtoms[checkBox]; k++) { int visIndex, index2, ind23; double sep2; /* the index of this atom in the visibleAtoms array */ visIndex = boxes->boxAtoms[checkBox][k]; /* skip if this atom is not in the second selection */ if (!sel2[visIndex]) continue; /* atom index */ index2 = visibleAtoms[visIndex]; /* skip if same atom */ if (index == index2) continue; /* atomic separation */ ind23 = index2 * 3; sep2 = atomicSeparation2(rxa, rya, rza, pos[ind23], pos[ind23 + 1], pos[ind23 + 2], cellDims[0], cellDims[1], cellDims[2], PBC[0], PBC[1], PBC[2]); /* put in bin */ if (sep2 >= start2 && sep2 < finish2) { int binIndex; double sep; sep = sqrt(sep2); binIndex = (int) ((sep - start) / interval); #pragma omp atomic hist[binIndex]++; } } } } } /* free memory */ freeBoxes(boxes); /* raise an exception if there were any errors */ if (errorCount) { PyErr_SetString(PyExc_RuntimeError, "computeHistogram failed; probably box index error (check stderr)"); return 4; } return 0; } /******************************************************************************* ** Normalise the RDF *******************************************************************************/ static void normaliseRDF(int numBins, int sel1cnt, int sel2cnt, int duplicates, double start, double interval, double *cellDims, double *rdf) { int i; double pair_dens; const double fourThirdsPi = 4.0 / 3.0 * CONST_PI; /* compute inverse of pair density (volume / number of pairs) */ pair_dens = cellDims[0] * cellDims[1] * cellDims[2]; pair_dens /= ((double)sel1cnt * (double)sel2cnt - (double)duplicates); /* loop over histogram bins */ for (i = 0; i < numBins; i++) { double rInner, rOuter, norm, shellVolume; if (rdf[i] != 0.0) { /* calculate the volume of this shell */ rInner = interval * i + start; rOuter = interval * (i + 1) + start; shellVolume = fourThirdsPi * (pow(rOuter, 3.0) - pow(rInner, 3.0)); /* normalisation factor is 1 / (pair_density * shellVolume) */ norm = pair_dens / shellVolume; rdf[i] = rdf[i] * norm; } } }
selection_sort.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include "sorting_algorithms.h" #define BILLION 1000000000L /** Sorts an input array using selection sort * Returns the time taken to sort the array */ uint64_t selection_sort (int *a, int n) { struct timespec start, end; uint64_t diff; clock_gettime(CLOCK_MONOTONIC, &start); int i, j, m, t; for (i = 0; i < n; i++) { for (j = i, m = i; j < n; j++) { if (a[j] < a[m]) { m = j; } } t = a[i]; a[i] = a[m]; a[m] = t; } clock_gettime(CLOCK_MONOTONIC, &end); diff = BILLION * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec; return diff; } /** Sorts an input array using selection sort and parallel processing * Returns the time taken to sort the array */ uint64_t selection_sort_parallel (int *a, int n) { struct timespec start, end; uint64_t diff; clock_gettime(CLOCK_MONOTONIC, &start); int i, j, m, t; for (i = 0; i < n; i++) { #pragma omp parallel for for (j = m = i; j < n; j++) { #pragma omp critical if (a[j] < a[m]) { m = j; } } t = a[i]; a[i] = a[m]; a[m] = t; } clock_gettime(CLOCK_MONOTONIC, &end); diff = BILLION * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec; return diff; }
openmp_kernels.c
#include "openmp_kernels.h" #define SIMD 4 void sg_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[si[i]]; } } void scatter_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[i]; } } void gather_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { //Users may want to set a specific safelen value like 32 #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[i] = source[si[i]]; } } void sg_accum_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[si[i]]; } } void scatter_accum_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[i]; } } void gather_accum_omp( sgData_t* restrict target, long* restrict ti, sgData_t* restrict source, long* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[i] += source[si[i]]; } }
plex.c
/* * compute the duplex structure of two RNA strands, * allowing only inter-strand base pairs. * see cofold() for computing hybrid structures without * restriction. * Ivo Hofacker * Vienna RNA package * */ /* * library containing the function used in rnaplex * the program rnaplex uses the following function * Lduplexfold: finds high scoring segments * it stores the end-position of these segments in an array * and call then for each of these positions the duplexfold function * which allows one to make backtracking for each of the high scoring position * It allows one to find suboptimal partially overlapping (depends on a a parameter) * duplexes between a long RNA and a shorter one. * Contrarly to RNAduplex, the energy model is not in E~log(N), * where N is the length of an interial loop but used an affine model, * where the extension and begin parameter are fitted to the energy * parameter used by RNAduplex. This allows one to check for duplex between a short RNA(20nt) * and a long one at the speed of 1Mnt/s. At this speed the whole genome (3Gnt) can be analyzed for one siRNA * in about 50 minutes. * The algorithm is based on an idea by Durbin and Eddy:when the alginment reach a value larger than a * given threshold this value is stored in an array. When the alignment score goes * then under this threshold, the alignemnent begin from this value, in that way the backtracking allow us * to find all non-overlapping high-scoring segments. * For more information check "durbin, biological sequence analysis" */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/pair_mat.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/plex.h" #include "ViennaRNA/ali_plex.h" #include "ViennaRNA/loops/all.h" /* #################SIMD############### */ /* int subopt_sorted=0; */ #define PUBLIC #define PRIVATE static #define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */ #define NEW_NINIO 1 /* new asymetry penalty */ #define ARRAY 32 /*array size*/ #define UNIT 100 #define MINPSCORE -2 * UNIT /** *** Macro that define indices for the Single Array approach defined in FLduplexfold_XS->gain of 20% in runtime *** so that everything is done in a 1D array. *** input is idx for i, j for j and the length of the query RNA *** 1D is divided in 6 subarrays, one for each number of allowed state *** The length of each subarray is 5*L. 5 the maximal stored distance on the target sequence, *** L is the length of the query sequence **/ #define LCI(i, j, l) ((i) * l + j) #define LINI(i, j, l) ((i + 5) * l + j) #define LBXI(i, j, l) ((i + 10) * l + j) #define LBYI(i, j, l) ((i + 15) * l + j) #define LINIX(i, j, l) ((i + 20) * l + j) #define LINIY(i, j, l) ((i + 25) * l + j) PRIVATE void encode_seqs(const char *s1, const char *s2); PRIVATE short * encode_seq(const char *seq); PRIVATE void update_dfold_params(void); /** *** duplexfold(_XS)/backtrack(_XS) computes duplex interaction with standard energy and considers extension_cost *** find_max(_XS)/plot_max(_XS) find suboptimals and MFE *** fduplexfold(_XS) computes duplex in a plex way **/ PRIVATE duplexT duplexfold(const char *s1, const char *s2, const int extension_cost); PRIVATE char * backtrack(int i, int j, const int extension_cost); PRIVATE void find_max(const int *position, const int *position_j, const int delta, const int threshold, const int length, const char *s1, const char *s2, const int extension_cost, const int fast, const int il_a, const int il_b, const int b_a, const int b_b); PRIVATE void plot_max(const int max, const int max_pos, const int max_pos_j, const int alignment_length, const char *s1, const char *s2, const int extension_cost, const int fast, const int il_a, const int il_b, const int b_a, const int b_b); /* PRIVATE duplexT duplexfold_XS(const char *s1, const char *s2,const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold); */ PRIVATE duplexT duplexfold_XS(const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold, const int i_flag, const int j_flag); /* PRIVATE char * backtrack_XS(int i, int j, const int** access_s1, const int** access_s2); */ PRIVATE char * backtrack_XS(int i, int j, const int **access_s1, const int **access_s2, const int i_flag, const int j_flag); PRIVATE void find_max_XS(const int *position, const int *position_j, const int delta, const int threshold, const int alignment_length, const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int fast, const int il_a, const int il_b, const int b_a, const int b_b); PRIVATE void plot_max_XS(const int max, const int max_pos, const int max_pos_j, const int alignment_length, const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int fast, const int il_a, const int il_b, const int b_a, const int b_b); PRIVATE duplexT fduplexfold(const char *s1, const char *s2, const int extension_cost, const int il_a, const int il_b, const int b_a, const int b_b); PRIVATE char * fbacktrack(int i, int j, const int extension_cost, const int il_a, const int il_b, const int b_a, const int b_b, int *dG); PRIVATE duplexT fduplexfold_XS(const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold, const int il_a, const int il_b, const int b_a, const int b_b); PRIVATE char * fbacktrack_XS(int i, int j, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int il_a, const int il_b, const int b_a, const int b_b, int *dGe, int *dGeplex, int *dGx, int *dGy); /*@unused@*/ #define MAXSECTORS 500 /* dimension for a backtrack array */ #define LOCALITY 0. /* locality parameter for base-pairs */ PRIVATE vrna_param_t *P = NULL; /** *** energy array used in fduplexfold and fduplexfold_XS *** We do not use the 1D array here as it is not time critical *** It also makes the code more readable *** c -> stack;in -> interior loop;bx/by->bulge;inx/iny->1xn loops **/ PRIVATE int **c = NULL, **in = NULL, **bx = NULL, **by = NULL, **inx = NULL, **iny = NULL; /** *** S1, SS1, ... contains the encoded sequence for target and query *** n1, n2, n3, n4 contains target and query length **/ PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL; /*contains the sequences*/ PRIVATE int n1, n2; /* sequence lengths */ PRIVATE int n3, n4; /*sequence length for the duplex*/; /*-----------------------------------------------------------------------duplexfold_XS---------------------------------------------------------------------------*/ /** *** duplexfold_XS is the pendant to the duplex function as defined in duplex.c *** but takes the accessibility into account. It is similar to the MFE version of RNAup *** The only approximation made is that target 3' end - query 5' end base pair is known *** s1,s2 are the query and target sequence; access_s1, access_s2 are the accessibility *** profiles, i_pos, j_pos are the coordinates of the closing pair. **/ PRIVATE duplexT duplexfold_XS(const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold, const int i_flag, const int j_flag) { int i, j, p, q, Emin = INF, l_min = 0, k_min = 0; char *struc; vrna_md_t md; struc = NULL; duplexT mfe; n3 = (int)strlen(s1); n4 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); for (i = 0; i <= n3; i++) c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); for (i = 0; i <= n3; i++) for (j = 0; j <= n4; j++) c[i][j] = INF; encode_seqs(s1, s2); int type, type2, type3, E, k, l; i = n3 - i_flag; j = 1 + j_flag; type = pair[S1[i]][S2[j]]; if (!type) { printf("Error during initialization of the duplex in duplexfold_XS\n"); mfe.structure = NULL; mfe.energy = INF; return mfe; } c[i][j] = P->DuplexInit; /** if (type>2) c[i][j] += P->TerminalAU; *** c[i][j]+=P->dangle3[rtype[type]][SS1[i+1]]; *** c[i][j]+=P->dangle5[rtype[type]][SS2[j-1]]; *** The three above lines are replaced by the line below **/ c[i][j] += vrna_E_ext_stem(rtype[type], (j_flag ? SS2[j - 1] : -1), (i_flag ? SS1[i + 1] : -1), P); /* * if(j_flag ==0 && i_flag==0){ * c[i][j] += vrna_E_ext_stem(rtype[type], -1 , -1 , P); * }else if(j_flag ==0 && i_flag==1){ * c[i][j] += vrna_E_ext_stem(rtype[type], -1 , SS1[i+1], P); * }else if(j_flag ==1 && i_flag==0){ * c[i][j] += vrna_E_ext_stem(rtype[type], SS2[j-1] , -1, P); * }else { * c[i][j] += vrna_E_ext_stem(rtype[type], SS2[j-1] , SS1[i+1], P); * } * Just in case we have only one bp, we initialize ... * k_min, l_min and Emin */ k_min = i; l_min = j; Emin = c[i][j]; for (k = i; k > 1; k--) { if (k < i) c[k + 1][0] = INF; for (l = j; l <= n4 - 1; l++) { if (!(k == i && l == j)) c[k][l] = INF; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; for (p = k + 1; p <= n3 - i_flag && p < k + MAXLOOP - 1; p++) { for (q = l - 1; q >= 1 + j_flag; q--) { if (p - k + l - q - 2 > MAXLOOP) break; type3 = pair[S1[p]][S2[q]]; if (!type3) continue; E = E_IntLoop(p - k - 1, l - q - 1, type2, rtype[type3], SS1[k + 1], SS2[l - 1], SS1[p - 1], SS2[q + 1], P); c[k][l] = MIN2(c[k][l], c[p][q] + E); } } E = c[k][l]; E += access_s1[i - k + 1][i_pos] + access_s2[l - 1][j_pos + (l - 1) - 1]; /**if (type2>2) E += P->TerminalAU; ***if (k>1) E += P->dangle5[type2][SS1[k-1]]; ***if (l<n4) E += P->dangle3[type2][SS2[l+1]]; *** Replaced by the line below **/ E += vrna_E_ext_stem(type2, (k > 1) ? SS1[k - 1] : -1, (l < n4) ? SS2[l + 1] : -1, P); if (E < Emin) { Emin = E; k_min = k; l_min = l; } } } if (Emin > threshold) { mfe.energy = INF; mfe.ddG = INF; mfe.structure = NULL; for (i = 0; i <= n3; i++) free(c[i]); free(c); free(S1); free(S2); free(SS1); free(SS2); return mfe; } else { struc = backtrack_XS(k_min, l_min, access_s1, access_s2, i_flag, j_flag); } /** *** find best dangles combination **/ int dx_5, dx_3, dy_5, dy_3, dGx, dGy, bonus_x; dx_5 = 0; dx_3 = 0; dy_5 = 0; dy_3 = 0; dGx = 0; dGy = 0; bonus_x = 0; /* * x--------x * |||||||| * x--------x */ dGx = access_s1[i - k_min + 1][i_pos]; dx_3 = 0; dx_5 = 0; bonus_x = 0; dGy = access_s2[l_min - j + 1][j_pos + (l_min - 1)]; mfe.tb = i_pos - 9 - i + k_min - 1 - dx_5; mfe.te = i_pos - 9 - 1 + dx_3; mfe.qb = j_pos - 9 - 1 - dy_5; mfe.qe = j_pos + l_min - 3 - 9 + dy_3; mfe.ddG = (double)Emin * 0.01; mfe.dG1 = (double)dGx * 0.01; mfe.dG2 = (double)dGy * 0.01; mfe.energy = mfe.ddG - mfe.dG1 - mfe.dG2; mfe.structure = struc; for (i = 0; i <= n3; i++) free(c[i]); free(c); free(S1); free(S2); free(SS1); free(SS2); return mfe; } PRIVATE char * backtrack_XS(int i, int j, const int **access_s1, const int **access_s2, const int i_flag, const int j_flag) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; char *st1, *st2, *struc; st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1)); i0 = i; /*MAX2(i-1,1);*/ j0 = j;/*MIN2(j+1,n4);*/ while (i <= n3 - i_flag && j >= 1 + j_flag) { E = c[i][j]; traced = 0; st1[i - 1] = '('; st2[j - 1] = ')'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex bli"); for (k = i + 1; k <= n3 && k > i - MAXLOOP - 2; k++) { for (l = j - 1; l >= 1; l--) { int LE; if (i - k + l - j - 2 > MAXLOOP) break; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(k - i - 1, j - l - 1, type, rtype[type2], SS1[i + 1], SS2[j - 1], SS1[k - 1], SS2[l + 1], P); if (E == c[k][l] + LE) { traced = 1; i = k; j = l; break; } } if (traced) break; } if (!traced) { #if 0 if (i < n3) E -= P->dangle3[rtype[type]][SS1[i + 1]]; /* +access_s1[1][i+1]; */ if (j > 1) E -= P->dangle5[rtype[type]][SS2[j - 1]]; /* +access_s2[1][j+1]; */ if (type > 2) E -= P->TerminalAU; #endif E -= vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P); break; if (E != P->DuplexInit) vrna_message_error("backtrack failed in fold duplex bal"); else break; } } /* * if (i<n3) i++; * if (j>1) j--; */ struc = (char *)vrna_alloc(i - i0 + 1 + j0 - j + 1 + 2); for (k = MAX2(i0, 1); k <= i; k++) if (!st1[k - 1]) st1[k - 1] = '.'; for (k = j; k <= j0; k++) if (!st2[k - 1]) st2[k - 1] = '.'; strcpy(struc, st1 + MAX2(i0 - 1, 0)); strcat(struc, "&"); strcat(struc, st2 + j - 1); free(st1); free(st2); return struc; } /** *** fduplexfold(_XS) computes the interaction based on the plex energy model. *** Faster than duplex approach, but not standard model compliant *** We use the standard matrix (c, in, etc..., because we backtrack) **/ PRIVATE duplexT fduplexfold_XS(const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold, const int il_a, const int il_b, const int b_a, const int b_b) { /** *** i,j recursion index *** Emin, i_min, j_min MFE position and energy *** mfe struc duplex structure **/ int i, j, Emin, i_min, j_min, l1; duplexT mfe; char *struc; /** *** bext=b_a bulge extension parameter for linear model *** iopen=il_b interior opening for linear model *** iext_s=2*il_a asymmetric extension for interior loop *** iext_ass=60+il_a symmetric extension for interior loop *** min_colonne=INF; max score of a row *** i_length; *** max_pos; position of best hit during recursion on target *** max_pos_j; position of best hit during recursion on query *** temp; temp variable for min_colonne *** min_j_colonne; position of the minimum on query in row j *** max=INF; absolute MFE *** n3,n4 length of target and query *** DJ contains the accessibility penalty for the query sequence *** maxPenalty contains the maximum penalty **/ int bopen = b_b; int bext = b_a; int iopen = il_b; int iext_s = 2 * il_a; /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */ int iext_ass = 50 + il_a; /* iext_ass assymetric extension of interior loop, either on i or on j side. */ int min_colonne = INF; /* enthaelt das maximum einer kolonne */ int i_length; int max_pos; /* get position of the best hit */ int max_pos_j; int temp = INF; int min_j_colonne; int max = INF; int **DJ; int maxPenalty[4]; vrna_md_t md; /** *** variable initialization **/ n3 = (int)strlen(s1); n4 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } /** *** array initialization **/ c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); in = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); bx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); by = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); inx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); iny = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); /* #pragma omp parallel for */ for (i = 0; i <= n3; i++) { c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); in[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); bx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); by[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); inx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); iny[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); } for (i = 0; i < n3; i++) { for (j = 0; j < n4; j++) { in[i][j] = INF; /* no in before 1 */ c[i][j] = INF; /* no bulge and no in before n2 */ bx[i][j] = INF; /* no bulge before 1 */ by[i][j] = INF; inx[i][j] = INF; /* no bulge before 1 */ iny[i][j] = INF; } } /** *** sequence encoding **/ encode_seqs(s1, s2); /** *** Compute max accessibility penalty for the query only once **/ maxPenalty[0] = (int)-1 * P->stack[2][2] / 2; maxPenalty[1] = (int)-1 * P->stack[2][2]; maxPenalty[2] = (int)-3 * P->stack[2][2] / 2; maxPenalty[3] = (int)-2 * P->stack[2][2]; DJ = (int **)vrna_alloc(4 * sizeof(int *)); DJ[0] = (int *)vrna_alloc((1 + n4) * sizeof(int)); DJ[1] = (int *)vrna_alloc((1 + n4) * sizeof(int)); DJ[2] = (int *)vrna_alloc((1 + n4) * sizeof(int)); DJ[3] = (int *)vrna_alloc((1 + n4) * sizeof(int)); j = n4 - 9; while (--j > 9) { int jdiff = j_pos + j - 11; /** *** Depending in which direction (i:1->n vs j:m->1) the accessibility is computed we get slightly different results. *** We reduce the discrepancies by taking the average of d^i_k and d^j_l **/ DJ[0][j] = 0.5 * (access_s2[5][jdiff + 4] - access_s2[4][jdiff + 4] + access_s2[5][jdiff] - access_s2[4][jdiff - 1]); DJ[1][j] = 0.5 * (access_s2[5][jdiff + 5] - access_s2[4][jdiff + 5] + access_s2[5][jdiff + 1] - access_s2[4][jdiff]) + DJ[0][j]; DJ[2][j] = 0.5 * (access_s2[5][jdiff + 6] - access_s2[4][jdiff + 6] + access_s2[5][jdiff + 2] - access_s2[4][jdiff + 1]) + DJ[1][j]; DJ[3][j] = 0.5 * (access_s2[5][jdiff + 7] - access_s2[4][jdiff + 7] + access_s2[5][jdiff + 3] - access_s2[4][jdiff + 2]) + DJ[2][j]; /* * DJ[0][j] = access_s2[5][jdiff+4] - access_s2[4][jdiff+4] ; * DJ[1][j] = access_s2[5][jdiff+5] - access_s2[4][jdiff+5] + DJ[0][j]; * DJ[2][j] = access_s2[5][jdiff+6] - access_s2[4][jdiff+6] + DJ[1][j]; * DJ[3][j] = access_s2[5][jdiff+7] - access_s2[4][jdiff+7] + DJ[2][j]; * DJ[0][j] = MIN2(DJ[0][j],maxPenalty[0]); * DJ[1][j] = MIN2(DJ[1][j],maxPenalty[1]); * DJ[2][j] = MIN2(DJ[2][j],maxPenalty[2]); * DJ[3][j] = MIN2(DJ[3][j],maxPenalty[3]); */ } /** *** Start of the recursion *** first and last 10 nucleotides on target and query are dummy nucleotides *** allow to reduce number of if test **/ i = 11; i_length = n3 - 9; while (i < i_length) { int di1, di2, di3, di4; int idiff = i_pos - (n3 - 10 - i); di1 = 0.5 * (access_s1[5][idiff + 4] - access_s1[4][idiff + 4] + access_s1[5][idiff] - access_s1[4][idiff - 1]); di2 = 0.5 * (access_s1[5][idiff + 3] - access_s1[4][idiff + 3] + access_s1[5][idiff - 1] - access_s1[4][idiff - 2]) + di1; di3 = 0.5 * (access_s1[5][idiff + 2] - access_s1[4][idiff + 2] + access_s1[5][idiff - 2] - access_s1[4][idiff - 3]) + di2; di4 = 0.5 * (access_s1[5][idiff + 1] - access_s1[4][idiff + 1] + access_s1[5][idiff - 3] - access_s1[4][idiff - 4]) + di3; /* * di1 = access_s1[5][idiff] - access_s1[4][idiff-1]; * di2 = access_s1[5][idiff-1] - access_s1[4][idiff-2] + di1; * di3 = access_s1[5][idiff-2] - access_s1[4][idiff-3] + di2; * di4 = access_s1[5][idiff-3] - access_s1[4][idiff-4] + di3; * di1=MIN2(di1,maxPenalty[0]); * di2=MIN2(di2,maxPenalty[1]); * di3=MIN2(di3,maxPenalty[2]); * di4=MIN2(di4,maxPenalty[3]); */ j = n4 - 9; min_colonne = INF; while (10 < --j) { int dj1, dj2, dj3, dj4; int jdiff = j_pos + j - 11; dj1 = DJ[0][j]; dj2 = DJ[1][j]; dj3 = DJ[2][j]; dj4 = DJ[3][j]; int type, type2; type = pair[S1[i]][S2[j]]; /** *** Start duplex **/ /* * c[i][j]=type ? P->DuplexInit + access_s1[1][idiff]+access_s2[1][jdiff] : INF; */ c[i][j] = type ? P->DuplexInit : INF; /** *** update lin bx by linx liny matrix **/ type2 = pair[S2[j + 1]][S1[i - 1]]; /** *** start/extend interior loop **/ in[i][j] = MIN2( c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1, in[i - 1][j] + iext_ass + di1); /** *** start/extend nx1 target *** use same type2 as for in **/ inx[i][j] = MIN2( c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1, inx[i - 1][j] + iext_ass + di1); /** *** start/extend 1xn target *** use same type2 as for in **/ iny[i][j] = MIN2( c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1, iny[i][j + 1] + iext_ass + dj1); /** *** extend interior loop **/ in[i][j] = MIN2(in[i][j], in[i][j + 1] + iext_ass + dj1); in[i][j] = MIN2(in[i][j], in[i - 1][j + 1] + iext_s + di1 + dj1); /** *** start/extend bulge target **/ type2 = pair[S2[j]][S1[i - 1]]; bx[i][j] = MIN2(bx[i - 1][j] + bext + di1, c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + di1); /** *** start/extend bulge query **/ type2 = pair[S2[j + 1]][S1[i]]; by[i][j] = MIN2(by[i][j + 1] + bext + dj1, c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + dj1); /** ***end update recursion ***######################## Start stack extension############################## **/ if (!type) continue; c[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); /** *** stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 1]])) c[i][j] = MIN2(c[i - 1][j + 1] + P->stack[rtype[type]][type2] + di1 + dj1, c[i][j]); /** *** 1x0 / 0x1 stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 2]])) c[i][j] = MIN2(c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2, c[i][j]); if ((type2 = pair[S1[i - 2]][S2[j + 1]])) c[i][j] = MIN2(c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1, c[i][j]); /** *** 1x1 / 2x2 stack extension **/ if ((type2 = pair[S1[i - 2]][S2[j + 2]])) c[i][j] = MIN2( c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2, c[i][j]); if ((type2 = pair[S1[i - 3]][S2[j + 3]])) { c[i][j] = MIN2(c[i - 3][j + 3] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di3 + dj3, c[i][j]); } /** *** 1x2 / 2x1 stack extension *** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to *** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]] **/ if ((type2 = pair[S1[i - 3]][S2[j + 2]])) { c[i][j] = MIN2( c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + di3 + dj2, c[i][j]); } if ((type2 = pair[S1[i - 2]][S2[j + 3]])) { c[i][j] = MIN2( c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di2 + dj3, c[i][j]); } /** *** 2x3 / 3x2 stack extension **/ if ((type2 = pair[S1[i - 4]][S2[j + 3]])) c[i][j] = MIN2(c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3, c[i][j]); if ((type2 = pair[S1[i - 3]][S2[j + 4]])) c[i][j] = MIN2(c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4, c[i][j]); /** *** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3 **/ /** *** 3x3 or more **/ c[i][j] = MIN2( in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + di3 + dj3, c[i][j]); /** *** 2xn or more **/ c[i][j] = MIN2( in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di4 + dj2, c[i][j]); /** *** nx2 or more **/ c[i][j] = MIN2( in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di2 + dj4, c[i][j]); /** *** nx1 n>2 **/ c[i][j] = MIN2( inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1, c[i][j]); /** *** 1xn n>2 **/ c[i][j] = MIN2( iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + dj3 + di1, c[i][j]); /** *** nx0 n>1 **/ int bAU; bAU = (type > 2 ? P->TerminalAU : 0); c[i][j] = MIN2(bx[i - 2][j + 1] + di2 + dj1 + bext + bAU, c[i][j]); /** *** 0xn n>1 **/ c[i][j] = MIN2(by[i - 1][j + 2] + di1 + dj2 + bext + bAU, c[i][j]); /* * remove this line printf("%d\t",c[i][j]); */ temp = min_colonne; min_colonne = MIN2(c[i][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P), min_colonne); if (temp > min_colonne) min_j_colonne = j; /* ---------------------------------------------------------------------end update */ } if (max >= min_colonne) { max = min_colonne; max_pos = i; max_pos_j = min_j_colonne; } i++; /* * remove this line printf("\n"); */ } Emin = max; if (Emin > threshold) { free(S1); free(S2); free(SS1); free(SS2); for (i = 0; i <= n3; i++) { free(c[i]); free(in[i]); free(bx[i]); free(by[i]); free(inx[i]); free(iny[i]); } for (i = 0; i <= 3; i++) free(DJ[i]); free(c); free(in); free(bx); free(by); free(inx); free(iny); free(DJ); mfe.energy = 0; mfe.structure = NULL; return mfe; } i_min = max_pos; j_min = max_pos_j; int dGe, dGeplex, dGx, dGy; dGe = dGeplex = dGx = dGy = 0; /* printf("MAX fduplexfold_XS %d\n",Emin); */ struc = fbacktrack_XS(i_min, j_min, access_s1, access_s2, i_pos, j_pos, il_a, il_b, b_a, b_b, &dGe, &dGeplex, &dGx, &dGy); l1 = strchr(struc, '&') - struc; int size; size = strlen(struc) - 1; int lengthx; int endx; int lengthy; int endy; lengthx = l1; lengthx -= (struc[0] == '.' ? 1 : 0); lengthx -= (struc[l1 - 1] == '.' ? 1 : 0); endx = (i_pos - (n3 - i_min)); lengthy = size - l1; lengthy -= (struc[size] == '.' ? 1 : 0); lengthy -= (struc[l1 + 1] == '.' ? 1 : 0); endy = j_pos + j_min + lengthy - 22; if (i_min < n3 - 10) i_min++; if (j_min > 11) j_min--; mfe.i = i_min; mfe.j = j_min; mfe.ddG = (double)Emin * 0.01; mfe.structure = struc; mfe.energy_backtrack = (double)dGe * 0.01; mfe.energy = (double)dGeplex * 0.01; mfe.opening_backtrack_x = (double)dGx * 0.01; mfe.opening_backtrack_y = (double)dGy * 0.01; mfe.dG1 = 0; /* !remove access to complete access array (double) access_s1[lengthx][endx+10] * 0.01; */ mfe.dG2 = 0; /* !remove access to complete access array (double) access_s2[lengthy][endy+10] * 0.01; */ free(S1); free(S2); free(SS1); free(SS2); for (i = 0; i <= n3; i++) { free(c[i]); free(in[i]); free(bx[i]); free(by[i]); free(inx[i]); free(iny[i]); } for (i = 0; i <= 3; i++) free(DJ[i]); free(DJ); free(c); free(in); free(bx); free(by); free(iny); free(inx); return mfe; } PRIVATE char * fbacktrack_XS(int i, int j, const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int il_a, const int il_b, const int b_a, const int b_b, int *dG, int *dGplex, int *dGx, int *dGy) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; char *st1, *st2, *struc; int bopen = b_b; int bext = b_a; int iopen = il_b; int iext_s = 2 * il_a; /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */ int iext_ass = 50 + il_a; /* iext_ass assymetric extension of interior loop, either on i or on j side. */ st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1)); i0 = MIN2(i + 1, n3 - 10); j0 = MAX2(j - 1, 11); int state; state = 1; /* we start backtracking from a a pair , i.e. c-matrix */ /* state 1 -> base pair, c * state 2 -> interior loop, in * state 3 -> bx loop, bx * state 4 -> by loop, by */ traced = 1; k = i; l = j; /* stores the i,j information for subsequence usage see * */ int idiff, jdiff; /** *** (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]]; **/ int maxPenalty[4]; vrna_md_t md; set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_dfold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } maxPenalty[0] = (int)-1 * P->stack[2][2] / 2; maxPenalty[1] = (int)-1 * P->stack[2][2]; maxPenalty[2] = (int)-3 * P->stack[2][2] / 2; maxPenalty[3] = (int)-2 * P->stack[2][2]; type = pair[S1[i]][S2[j]]; *dG += vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P); *dGplex = *dG; while (i > 10 && j <= n4 - 9 && traced) { int di1, di2, di3, di4; idiff = i_pos - (n3 - 10 - i); di1 = 0.5 * (access_s1[5][idiff + 4] - access_s1[4][idiff + 4] + access_s1[5][idiff] - access_s1[4][idiff - 1]); di2 = 0.5 * (access_s1[5][idiff + 3] - access_s1[4][idiff + 3] + access_s1[5][idiff - 1] - access_s1[4][idiff - 2]) + di1; di3 = 0.5 * (access_s1[5][idiff + 2] - access_s1[4][idiff + 2] + access_s1[5][idiff - 2] - access_s1[4][idiff - 3]) + di2; di4 = 0.5 * (access_s1[5][idiff + 1] - access_s1[4][idiff + 1] + access_s1[5][idiff - 3] - access_s1[4][idiff - 4]) + di3; /* * di1 = access_s1[5][idiff] - access_s1[4][idiff-1]; * di2 = access_s1[5][idiff-1] - access_s1[4][idiff-2] + di1; * di3 = access_s1[5][idiff-2] - access_s1[4][idiff-3] + di2; * di4 = access_s1[5][idiff-3] - access_s1[4][idiff-4] + di3; * di1=MIN2(di1,maxPenalty[0]); * di2=MIN2(di2,maxPenalty[1]); * di3=MIN2(di3,maxPenalty[2]); * di4=MIN2(di4,maxPenalty[3]); */ int dj1, dj2, dj3, dj4; jdiff = j_pos + j - 11; dj1 = 0.5 * (access_s2[5][jdiff + 4] - access_s2[4][jdiff + 4] + access_s2[5][jdiff] - access_s2[4][jdiff - 1]); dj2 = 0.5 * (access_s2[5][jdiff + 5] - access_s2[4][jdiff + 5] + access_s2[5][jdiff + 1] - access_s2[4][jdiff]) + dj1; dj3 = 0.5 * (access_s2[5][jdiff + 6] - access_s2[4][jdiff + 6] + access_s2[5][jdiff + 2] - access_s2[4][jdiff + 1]) + dj2; dj4 = 0.5 * (access_s2[5][jdiff + 7] - access_s2[4][jdiff + 7] + access_s2[5][jdiff + 3] - access_s2[4][jdiff + 2]) + dj3; /* * dj1 = access_s2[5][jdiff+4] - access_s2[4][jdiff+4]; * dj2 = access_s2[5][jdiff+5] - access_s2[4][jdiff+5] + dj1; * dj3 = access_s2[5][jdiff+6] - access_s2[4][jdiff+6] + dj2; * dj4 = access_s2[5][jdiff+7] - access_s2[4][jdiff+7] + dj3; * dj1=MIN2(dj1,maxPenalty[0]); * dj2=MIN2(dj2,maxPenalty[1]); * dj3=MIN2(dj3,maxPenalty[2]); * dj4=MIN2(dj4,maxPenalty[3]); */ traced = 0; switch (state) { case 1: type = pair[S1[i]][S2[j]]; int bAU; bAU = (type > 2 ? P->TerminalAU : 0); if (!type) vrna_message_error("backtrack failed in fold duplex"); type2 = pair[S1[i - 1]][S2[j + 1]]; if (type2 && c[i][j] == (c[i - 1][j + 1] + P->stack[rtype[type]][type2] + di1 + dj1)) { k = i - 1; l = j + 1; (*dG) += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di1; *dGy += dj1; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 1]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2)) { k = i - 1; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di1; *dGy += dj2; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 1]]; if (type2 && c[i][j] == (c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1)) { k = i - 2; l = j + 1; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di2; *dGy += dj1; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2)) { k = i - 2; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di2; *dGy += dj2; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 3][j + 3] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di3 + dj3)) { k = i - 3; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di3; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + di3 + dj2)) { k = i - 3; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di3; *dGy += dj2; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di2 + dj3)) { k = i - 2; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di2; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 4]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3)) { k = i - 4; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di2; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 4]]; if (type2 && c[i][j] == (c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4)) { k = i - 3; l = j + 4; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di2; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } if (c[i][j] == (in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di3 + dj3 + 2 * iext_s)) { k = i; l = j; *dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s; *dGx += di3; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 3; j = j + 3; state = 2; traced = 1; break; } if (c[i][j] == (in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di4 + dj2 + iext_s + 2 * iext_ass)) { k = i; l = j; *dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass; *dGx += di4; *dGy += dj2; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 4; j = j + 2; state = 2; traced = 1; break; } if (c[i][j] == (in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj4 + iext_s + 2 * iext_ass)) { k = i; l = j; *dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass; *dGx += di2; *dGy += dj4; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 2; j = j + 4; state = 2; traced = 1; break; } if (c[i][j] == (inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1)) { k = i; l = j; *dGplex += P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1; *dGx += di3; *dGy += dj1; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 3; j = j + 1; state = 5; traced = 1; break; } if (c[i][j] == (iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di1 + dj3)) { k = i; l = j; *dGplex += P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di1 + dj3; *dGx += di1; *dGy += dj3; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 1; j = j + 3; state = 6; traced = 1; break; } if (c[i][j] == (bx[i - 2][j + 1] + di2 + dj1 + bext + bAU)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; *dGplex += bext + bAU; *dGx += di2; *dGy += dj1; i = i - 2; j = j + 1; state = 3; traced = 1; break; } if (c[i][j] == (by[i - 1][j + 2] + di1 + dj2 + bext + bAU)) { k = i; l = j; *dGplex += bext + bAU; *dGx += di1; *dGy += dj2; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 1; j = j + 2; state = 4; traced = 1; break; } break; case 2: if (in[i][j] == (in[i - 1][j + 1] + iext_s + di1 + dj1)) { i--; j++; *dGplex += iext_s; *dGx += di1; *dGy += dj1; state = 2; traced = 1; break; } if (in[i][j] == (in[i - 1][j] + iext_ass + di1)) { i = i - 1; *dGplex += iext_ass; *dGx += di1; state = 2; traced = 1; break; } if (in[i][j] == (in[i][j + 1] + iext_ass + dj1)) { j++; state = 2; *dGy += dj1; *dGplex += iext_ass; traced = 1; break; } type2 = pair[SS2[j + 1]][SS1[i - 1]]; if (type2 && in[i][j] == (c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1)) { *dGplex += P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s; int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di1; *dGy += dj1; i = k; j = l; state = 1; traced = 1; break; } case 3: if (bx[i][j] == (bx[i - 1][j] + bext + di1)) { i--; *dGplex += bext; *dGx += di1; state = 3; traced = 1; break; } type2 = pair[S2[j]][S1[i - 1]]; if (type2 && bx[i][j] == (c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + di1)) { int temp; temp = k; k = i - 1; i = temp; temp = l; l = j; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += bopen + bext + (type2 > 2 ? P->TerminalAU : 0); *dGx += di1; i = k; j = l; state = 1; traced = 1; break; } case 4: if (by[i][j] == (by[i][j + 1] + bext + dj1)) { j++; *dGplex += bext; state = 4; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i]]; if (type2 && by[i][j] == (c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + dj1)) { int temp; temp = k; k = i; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGplex += bopen + bext + (type2 > 2 ? P->TerminalAU : 0); *dGy += dj1; i = k; j = l; state = 1; traced = 1; break; } case 5: if (inx[i][j] == (inx[i - 1][j] + iext_ass + di1)) { i--; *dGplex += iext_ass; *dGx += di1; state = 5; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i - 1]]; if (type2 && inx[i][j] == (c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1)) { *dGplex += P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s; int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di1; *dGy += dj1; i = k; j = l; state = 1; traced = 1; break; } case 6: if (iny[i][j] == (iny[i][j + 1] + iext_ass + dj1)) { j++; *dGplex += iext_ass; *dGx += dj1; state = 6; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i - 1]]; if (type2 && iny[i][j] == (c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1)) { *dGplex += P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s; int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); *dGx += di1; *dGy += dj1; i = k; j = l; state = 1; traced = 1; break; } } } if (!traced) { idiff = i_pos - (n3 - 10 - i); jdiff = j_pos + j - 11; E = c[i][j]; /** *** if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *dG+=P->dangle5[type][SS1[i-1]];*dGplex+=P->dangle5[type][SS1[i-1]];} *** if (j<n4){E -= P->dangle3[type][SS2[j+1]]; *dG+=P->dangle3[type][SS2[j+1]];*dGplex+=P->dangle3[type][SS2[j+1]];} *** if (type>2) {E -= P->TerminalAU; *dG+=P->TerminalAU;*dGplex+=P->TerminalAU;} **/ int correction; correction = vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P); *dG += correction; *dGplex += correction; E -= correction; /* * if (E != P->DuplexInit+access_s1[1][idiff]+access_s2[1][jdiff]) { * vrna_message_error("backtrack failed in second fold duplex"); * } */ if (E != P->DuplexInit) { vrna_message_error("backtrack failed in second fold duplex"); } else { *dG += P->DuplexInit; *dGplex += P->DuplexInit; *dGx += 0; /* access_s1[1][idiff]; */ *dGy += 0; /* access_s2[1][jdiff]; */ st1[i - 1] = '('; st2[j - 1] = ')'; } } if (i > 11) i--; if (j < n4 - 10) j++; struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2); for (k = MAX2(i, 1); k <= i0; k++) if (!st1[k - 1]) st1[k - 1] = '.'; for (k = j0; k <= j; k++) if (!st2[k - 1]) st2[k - 1] = '.'; strcpy(struc, st1 + MAX2(i - 1, 0)); strcat(struc, "&"); strcat(struc, st2 + j0 - 1); /* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */ free(st1); free(st2); return struc; } duplexT ** Lduplexfold_XS(const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int threshold, const int alignment_length, const int delta, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { /** *** See variable definition in fduplexfold_XS **/ int i, j; int bopen = b_b; int bext = b_a; int iopen = il_b; int iext_s = 2 * il_a; int iext_ass = 50 + il_a; int min_colonne = INF; int i_length; int max_pos; int max_pos_j; int min_j_colonne; int max = INF; int *position; int *position_j; int maxPenalty[4]; int **DJ; /** *** 1D array corresponding to the standard 2d recursion matrix *** Makes the computation 20% faster **/ int *SA; vrna_md_t md; /** *** variable initialization **/ n1 = (int)strlen(s1); n2 = (int)strlen(s2); /** *** Sequence encoding **/ set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_dfold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } encode_seqs(s1, s2); /** *** Position of the high score on the target and query sequence **/ position = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int)); position_j = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int)); /** *** extension penalty, computed only once, further reduce the computation time **/ maxPenalty[0] = (int)-1 * P->stack[2][2] / 2; maxPenalty[1] = (int)-1 * P->stack[2][2]; maxPenalty[2] = (int)-3 * P->stack[2][2] / 2; maxPenalty[3] = (int)-2 * P->stack[2][2]; DJ = (int **)vrna_alloc(4 * sizeof(int *)); DJ[0] = (int *)vrna_alloc(n2 * sizeof(int)); DJ[1] = (int *)vrna_alloc(n2 * sizeof(int)); DJ[2] = (int *)vrna_alloc(n2 * sizeof(int)); DJ[3] = (int *)vrna_alloc(n2 * sizeof(int)); j = n2 - 9; while (--j > 10) { DJ[0][j] = 0.5 * (access_s2[5][j + 4] - access_s2[4][j + 4] + access_s2[5][j] - access_s2[4][j - 1]); DJ[1][j] = 0.5 * (access_s2[5][j + 5] - access_s2[4][j + 5] + access_s2[5][j + 1] - access_s2[4][j]) + DJ[0][j]; DJ[2][j] = 0.5 * (access_s2[5][j + 6] - access_s2[4][j + 6] + access_s2[5][j + 2] - access_s2[4][j + 1]) + DJ[1][j]; DJ[3][j] = 0.5 * (access_s2[5][j + 7] - access_s2[4][j + 7] + access_s2[5][j + 3] - access_s2[4][j + 2]) + DJ[2][j]; /* * DJ[0][j] = access_s2[5][j+4] - access_s2[4][j+4] ; * DJ[1][j] = access_s2[5][j+5] - access_s2[4][j+5] + DJ[0][j]; * DJ[2][j] = access_s2[5][j+6] - access_s2[4][j+6] + DJ[1][j]; * DJ[3][j] = access_s2[5][j+7] - access_s2[4][j+7] + DJ[2][j]; * DJ[0][j] = MIN2(DJ[0][j],maxPenalty[0]); * DJ[1][j] = MIN2(DJ[1][j],maxPenalty[1]); * DJ[2][j] = MIN2(DJ[2][j],maxPenalty[2]); * DJ[3][j] = MIN2(DJ[3][j],maxPenalty[3]); */ } /** *** instead of having 4 2-dim arrays we use a unique 1-dim array *** The mapping 2d -> 1D is done based ont the macro *** LCI(i,j,l) ((i )*l + j) *** LINI(i,j,l) ((i + 5)*l + j) *** LBXI(i,j,l) ((i + 10)*l + j) *** LBYI(i,j,l) ((i + 15)*l + j) *** LINIX(i,j,l) ((i + 20)*l + j) *** LINIY(i,j,l) ((i + 25)*l + j) *** *** SA has a length of 5 (number of columns we look back) * *** * 6 (number of structures we look at) * *** * length of the sequence **/ SA = (int *)vrna_alloc(sizeof(int) * 5 * 6 * (n2 + 5)); for (j = n2 + 4; j >= 0; j--) { SA[(j * 30)] = SA[(j * 30) + 1] = SA[(j * 30) + 2] = SA[(j * 30) + 3] = SA[(j * 30) + 4] = INF; SA[(j * 30) + 5] = SA[(j * 30) + 1 + 5] = SA[(j * 30) + 2 + 5] = SA[(j * 30) + 3 + 5] = SA[(j * 30) + 4 + 5] = INF; SA[(j * 30) + 10] = SA[(j * 30) + 1 + 10] = SA[(j * 30) + 2 + 10] = SA[(j * 30) + 3 + 10] = SA[(j * 30) + 4 + 10] = INF; SA[(j * 30) + 15] = SA[(j * 30) + 1 + 15] = SA[(j * 30) + 2 + 15] = SA[(j * 30) + 3 + 15] = SA[(j * 30) + 4 + 15] = INF; SA[(j * 30) + 20] = SA[(j * 30) + 1 + 20] = SA[(j * 30) + 2 + 20] = SA[(j * 30) + 3 + 20] = SA[(j * 30) + 4 + 20] = INF; SA[(j * 30) + 25] = SA[(j * 30) + 1 + 25] = SA[(j * 30) + 2 + 25] = SA[(j * 30) + 3 + 25] = SA[(j * 30) + 4 + 25] = INF; } i = 10; i_length = n1 - 9; while (i < i_length) { int di1, di2, di3, di4; int idx = i % 5; int idx_1 = (i - 1) % 5; int idx_2 = (i - 2) % 5; int idx_3 = (i - 3) % 5; int idx_4 = (i - 4) % 5; di1 = 0.5 * (access_s1[5][i + 4] - access_s1[4][i + 4] + access_s1[5][i] - access_s1[4][i - 1]); di2 = 0.5 * (access_s1[5][i + 3] - access_s1[4][i + 3] + access_s1[5][i - 1] - access_s1[4][i - 2]) + di1; di3 = 0.5 * (access_s1[5][i + 2] - access_s1[4][i + 2] + access_s1[5][i - 2] - access_s1[4][i - 3]) + di2; di4 = 0.5 * (access_s1[5][i + 1] - access_s1[4][i + 1] + access_s1[5][i - 3] - access_s1[4][i - 4]) + di3; /* * di1 = access_s1[5][i] - access_s1[4][i-1]; * di2 = access_s1[5][i-1] - access_s1[4][i-2] + di1; * di3 = access_s1[5][i-2] - access_s1[4][i-3] + di2; * di4 = access_s1[5][i-3] - access_s1[4][i-4] + di3; * di1=MIN2(di1,maxPenalty[0]); * di2=MIN2(di2,maxPenalty[1]); * di3=MIN2(di3,maxPenalty[2]); * di4=MIN2(di4,maxPenalty[3]); */ j = n2 - 9; while (--j > 9) { int dj1, dj2, dj3, dj4; dj1 = DJ[0][j]; dj2 = DJ[1][j]; dj3 = DJ[2][j]; dj4 = DJ[3][j]; int type2, type, temp; type = pair[S1[i]][S2[j]]; /** *** Start duplex **/ /* SA[LCI(idx,j,n2)] = type ? P->DuplexInit + access_s1[1][i] + access_s2[1][j] : INF; */ SA[LCI(idx, j, n2)] = type ? P->DuplexInit : INF; /** *** update lin bx by linx liny matrix **/ type2 = pair[S2[j + 1]][S1[i - 1]]; /** *** start/extend interior loop **/ SA[LINI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatchI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s, SA[LINI(idx_1, j, n2)] + iext_ass + di1); /** *** start/extend nx1 target *** use same type2 as for in **/ SA[LINIX(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s, SA[LINIX(idx_1, j, n2)] + iext_ass + di1); /** *** start/extend 1xn target *** use same type2 as for in **/ SA[LINIY(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s, SA[LINIY(idx, j + 1, n2)] + iext_ass + dj1); /** *** extend interior loop **/ SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx, j + 1, n2)] + iext_ass + dj1); SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx_1, j + 1, n2)] + iext_s + di1 + dj1); /** *** start/extend bulge target **/ type2 = pair[S2[j]][S1[i - 1]]; SA[LBXI(idx, j, n2)] = MIN2(SA[LBXI(idx_1, j, n2)] + bext + di1, SA[LCI(idx_1, j, n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + di1); /** *** start/extend bulge query **/ type2 = pair[S2[j + 1]][S1[i]]; SA[LBYI(idx, j, n2)] = MIN2(SA[LBYI(idx, j + 1, n2)] + bext + dj1, SA[LCI(idx, j + 1, n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + dj1); /** ***end update recursion **/ if (!type) continue; /** *** stack extension **/ SA[LCI(idx, j, n2)] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); /** *** stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 1]])) SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->stack[rtype[type]][type2] + di1 + dj1, SA[LCI(idx, j, n2)]); /** *** 1x0 / 0x1 stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 2, n2)] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 2]][S2[j + 1]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 1, n2)] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1, SA[LCI(idx, j, n2)]); } /** *** 1x1 / 2x2 stack extension **/ if ((type2 = pair[S1[i - 2]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 2, n2)] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 3]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 3, n2)] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di3 + dj3, SA[LCI(idx, j, n2)]); } /** *** 1x2 / 2x1 stack extension *** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to *** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]] **/ if ((type2 = pair[S1[i - 3]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 2, n2)] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + di3 + dj2, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 2]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 3, n2)] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di2 + dj3, SA[LCI(idx, j, n2)]); } /** *** 2x3 / 3x2 stack extension **/ if ((type2 = pair[S1[i - 4]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_4, j + 3, n2)] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 3]][S2[j + 4]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 4, n2)] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4, SA[LCI(idx, j, n2)]); } /** *** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3 **/ /** *** 3x3 or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_3, j + 3, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + di3 + dj3, SA[LCI(idx, j, n2)]); /** *** 2xn or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_4, j + 2, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di4 + dj2, SA[LCI(idx, j, n2)]); /** *** nx2 or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_2, j + 4, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di2 + dj4, SA[LCI(idx, j, n2)]); /** *** nx1 n>2 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINIX(idx_3, j + 1, n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1, SA[LCI(idx, j, n2)]); /** *** 1xn n>2 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINIY(idx_1, j + 3, n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + dj3 + di1, SA[LCI(idx, j, n2)]); /** *** nx0 n>1 **/ int bAU; bAU = (type > 2 ? P->TerminalAU : 0); SA[LCI(idx, j, n2)] = MIN2(SA[LBXI(idx_2, j + 1, n2)] + di2 + dj1 + bext + bAU, SA[LCI(idx, j, n2)]); /** *** 0xn n>1 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LBYI(idx_1, j + 2, n2)] + di1 + dj2 + bext + bAU, SA[LCI(idx, j, n2)]); temp = min_colonne; /** *** (type>2?P->TerminalAU:0)+ *** P->dangle3[rtype[type]][SS1[i+1]]+ *** P->dangle5[rtype[type]][SS2[j-1]], **/ /* * remove this line printf("LCI %d:%d %d\t",i,j,SA[LCI(idx,j,n2)]); * remove this line printf("LI %d:%d %d\t",i,j, SA[LINI(idx,j,n2)]); */ min_colonne = MIN2(SA[LCI(idx, j, n2)] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P), min_colonne); if (temp > min_colonne) min_j_colonne = j; /* ---------------------------------------------------------------------end update */ } if (max >= min_colonne) { max = min_colonne; max_pos = i; max_pos_j = min_j_colonne; } position[i + delta] = min_colonne; min_colonne = INF; position_j[i + delta] = min_j_colonne; /* remove this line printf("\n"); */ i++; } /* printf("MAX: %d",max); */ free(S1); free(S2); free(SS1); free(SS2); free(SA); if (max < threshold) { find_max_XS(position, position_j, delta, threshold, alignment_length, s1, s2, access_s1, access_s2, fast, il_a, il_b, b_a, b_b); } if (max < INF) { plot_max_XS(max, max_pos, max_pos_j, alignment_length, s1, s2, access_s1, access_s2, fast, il_a, il_b, b_a, b_b); } for (i = 0; i <= 3; i++) free(DJ[i]); free(DJ); free(position); free(position_j); return NULL; } PRIVATE void find_max_XS(const int *position, const int *position_j, const int delta, const int threshold, const int alignment_length, const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { int pos = n1 - 9; if (fast == 1) { while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; int max; max = position[pos + delta]; printf("target upper bound %d: query lower bound %d (%5.2f) \n", pos - 10, max_pos_j - 10, ((double)max) / 100); pos = MAX2(10, pos + temp_min - delta); } } } else if (fast == 2) { pos = n1 - 9; while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; /* max_pos_j und pos entsprechen die realen position * in der erweiterten sequenz. * pos=1 -> position 1 in the sequence (and not 0 like in C) * max_pos_j -> position 1 in the sequence ( not 0 like in C) */ int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20)); strcpy(s3, "NNNNNNNNNN"); strcpy(s4, "NNNNNNNNNN"); strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1); strcat(s3, "NNNNNNNNNN"); strcat(s4, "NNNNNNNNNN"); s3[end_t - begin_t + 1 + 20] = '\0'; s4[end_q - begin_q + 1 + 20] = '\0'; duplexT test; test = fduplexfold_XS(s3, s4, access_s1, access_s2, end_t, begin_q, threshold, il_a, il_b, b_a, b_b); if (test.energy * 100 < threshold) { int l1 = strchr(test.structure, '&') - test.structure; printf( " %s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1 - 10, begin_t - 10 + test.i - 1 - 10, begin_q - 10 + test.j - 1 - 10, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10, test.ddG, test.energy, test.opening_backtrack_x, test.opening_backtrack_y, test.energy_backtrack, pos - 10, max_pos_j - 10, ((double)position[pos + delta]) / 100); pos = MAX2(10, pos + temp_min - delta); free(test.structure); } free(s3); free(s4); } } } else { pos = n1 - 9; while (pos-- > 10) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; /* position on i */ int max_pos_j; max_pos_j = position_j[pos + delta]; /* position on j */ int begin_t = MAX2(11, pos - alignment_length); int end_t = MIN2(n1 - 10, pos + 1); int begin_q = MAX2(11, max_pos_j - 1); int end_q = MIN2(n2 - 10, max_pos_j + alignment_length - 1); int i_flag; int j_flag; i_flag = (end_t == pos + 1 ? 1 : 0); j_flag = (begin_q == max_pos_j - 1 ? 1 : 0); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2)); strncpy(s3, (s1 + begin_t), end_t - begin_t + 1); strncpy(s4, (s2 + begin_q), end_q - begin_q + 1); s3[end_t - begin_t + 1] = '\0'; s4[end_q - begin_q + 1] = '\0'; duplexT test; test = duplexfold_XS(s3, s4, access_s1, access_s2, pos, max_pos_j, threshold, i_flag, j_flag); if (test.energy * 100 < threshold) { printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) i:%d,j:%d <%5.2f>\n", test.structure, test.tb, test.te, test.qb, test.qe, test.ddG, test.energy, test.dG1, test.dG2, pos - 10, max_pos_j - 10, ((double)position[pos + delta]) / 100); pos = MAX2(10, pos + temp_min - delta); } free(s3); free(s4); free(test.structure); } } } } #if 0 PRIVATE int compare(const void *sub1, const void *sub2) { int d; if (((duplexT *)sub1)->ddG > ((duplexT *)sub2)->ddG) return 1; if (((duplexT *)sub1)->ddG < ((duplexT *)sub2)->ddG) return -1; d = ((duplexT *)sub1)->i - ((duplexT *)sub2)->i; if (d != 0) return d; return ((duplexT *)sub1)->j - ((duplexT *)sub2)->j; } #endif PRIVATE void plot_max_XS(const int max, const int max_pos, const int max_pos_j, const int alignment_length, const char *s1, const char *s2, const int **access_s1, const int **access_s2, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { if (fast == 1) { printf("target upper bound %d: query lower bound %d (%5.2f)\n", max_pos - 3, max_pos_j, ((double)max) / 100); } else if (fast == 2) { int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, max_pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, max_pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20)); strcpy(s3, "NNNNNNNNNN"); strcpy(s4, "NNNNNNNNNN"); strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1); strcat(s3, "NNNNNNNNNN"); strcat(s4, "NNNNNNNNNN"); s3[end_t - begin_t + 1 + 20] = '\0'; s4[end_q - begin_q + 1 + 20] = '\0'; duplexT test; test = fduplexfold_XS(s3, s4, access_s1, access_s2, end_t, begin_q, INF, il_a, il_b, b_a, b_b); int l1 = strchr(test.structure, '&') - test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1 - 10, begin_t - 10 + test.i - 1 - 10, begin_q - 10 + test.j - 1 - 10, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10, test.ddG, test.energy, test.opening_backtrack_x, test.opening_backtrack_y, test.energy_backtrack, max_pos - 10, max_pos_j - 10, (double)max / 100); free(s3); free(s4); free(test.structure); } else { int begin_t = MAX2(11, max_pos - alignment_length); int end_t = MIN2(n1 - 10, max_pos + 1); int begin_q = MAX2(11, max_pos_j - 1); int end_q = MIN2(n2 - 10, max_pos_j + alignment_length - 1); int i_flag; int j_flag; i_flag = (end_t == max_pos + 1 ? 1 : 0); j_flag = (begin_q == max_pos_j - 1 ? 1 : 0); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2)); /* +1 for \0 +1 for distance */ char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2)); strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1); /* -1 to go from */ strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1); /* -1 to go from */ s3[end_t - begin_t + 1] = '\0'; /* */ s4[end_q - begin_q + 1] = '\0'; duplexT test; test = duplexfold_XS(s3, s4, access_s1, access_s2, max_pos, max_pos_j, INF, i_flag, j_flag); printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) i:%d,j:%d <%5.2f>\n", test.structure, test.tb, test.te, test.qb, test.qe, test.ddG, test.energy, test.dG1, test.dG2, max_pos - 10, max_pos_j - 10, (double)max / 100); free(s3); free(s4); free(test.structure); } } /*---------------------------------------------------------duplexfold----------------------------------------------------------------------------------*/ PRIVATE duplexT duplexfold(const char *s1, const char *s2, const int extension_cost) { int i, j, l1, Emin = INF, i_min = 0, j_min = 0; char *struc; duplexT mfe; vrna_md_t md; n3 = (int)strlen(s1); n4 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); for (i = 0; i <= n3; i++) c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); encode_seqs(s1, s2); for (i = 1; i <= n3; i++) { for (j = n4; j > 0; j--) { int type, type2, E, k, l; type = pair[S1[i]][S2[j]]; c[i][j] = type ? P->DuplexInit + 2 * extension_cost : INF; if (!type) continue; /** *** if (i>1) c[i][j] += P->dangle5[type][SS1[i-1]]+ extension_cost; *** if (j<n4) c[i][j] += P->dangle3[type][SS2[j+1]]+ extension_cost; *** if (type>2) c[i][j] += P->TerminalAU; **/ c[i][j] += vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P); for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) { for (l = j + 1; l <= n4; l++) { if (i - k + l - j - 2 > MAXLOOP) break; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P) + (i - k + l - j) * extension_cost; c[i][j] = MIN2(c[i][j], c[k][l] + E); } } E = c[i][j]; /** *** if (i<n3) E += P->dangle3[rtype[type]][SS1[i+1]]+extension_cost; *** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]]+extension_cost; *** if (type>2) E += P->TerminalAU; *** **/ E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n3) ? SS1[i + 1] : -1, P); if (E < Emin) { Emin = E; i_min = i; j_min = j; } } } struc = backtrack(i_min, j_min, extension_cost); if (i_min < n3) i_min++; if (j_min > 1) j_min--; l1 = strchr(struc, '&') - struc; int size; size = strlen(struc) - 1; Emin -= size * (extension_cost); mfe.i = i_min; mfe.j = j_min; mfe.energy = (double)Emin / 100.; mfe.structure = struc; for (i = 0; i <= n3; i++) free(c[i]); free(c); free(S1); free(S2); free(SS1); free(SS2); return mfe; } PRIVATE char * backtrack(int i, int j, const int extension_cost) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; char *st1, *st2, *struc; st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1)); i0 = MIN2(i + 1, n3); j0 = MAX2(j - 1, 1); while (i > 0 && j <= n4) { E = c[i][j]; traced = 0; st1[i - 1] = '('; st2[j - 1] = ')'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex"); for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) { for (l = j + 1; l <= n4; l++) { int LE; if (i - k + l - j - 2 > MAXLOOP) break; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P) + (i - k + l - j) * extension_cost; if (E == c[k][l] + LE) { traced = 1; i = k; j = l; break; } } if (traced) break; } if (!traced) { E -= vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P); /** *** if (i>1) E -= P->dangle5[type][SS1[i-1]]+extension_cost; *** if (j<n4) E -= P->dangle3[type][SS2[j+1]]+extension_cost; *** if (type>2) E -= P->TerminalAU; **/ if (E != P->DuplexInit + 2 * extension_cost) vrna_message_error("backtrack failed in fold duplex"); else break; } } if (i > 1) i--; if (j < n4) j++; struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2); for (k = MAX2(i, 1); k <= i0; k++) if (!st1[k - 1]) st1[k - 1] = '.'; for (k = j0; k <= j; k++) if (!st2[k - 1]) st2[k - 1] = '.'; strcpy(struc, st1 + MAX2(i - 1, 0)); strcat(struc, "&"); strcat(struc, st2 + j0 - 1); /* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */ free(st1); free(st2); return struc; } PRIVATE duplexT fduplexfold(const char *s1, const char *s2, const int extension_cost, const int il_a, const int il_b, const int b_a, const int b_b) { int i, j, Emin, i_min, j_min, l1; duplexT mfe; char *struc; int bopen = b_b; int bext = b_a + extension_cost; int iopen = il_b; int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */ int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */ int min_colonne = INF; /* enthaelt das maximum einer kolonne */ int i_length; int max_pos; /* get position of the best hit */ int max_pos_j; int temp = INF; int min_j_colonne; int max = INF; vrna_md_t md; /* FOLLOWING NEXT 4 LINE DEFINES AN ARRAY CONTAINING POSITION OF THE SUBOPT IN S1 */ n3 = (int)strlen(s1); n4 = (int)strlen(s2); /* * delta_check is the minimal distance allowed for two hits to be accepted * if both hits are closer, reject the smaller ( in term of position) hits * i want to implement a function that, given a position in a long sequence and a small sequence, * duplexfold them at this position and report the result at the command line * for this i first need to rewrite backtrack in order to remove the printf functio * END OF DEFINITION FOR NEEDED SUBOPT DATA */ set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } /*local c array initialization---------------------------------------------*/ c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); in = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); bx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); by = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); inx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); iny = (int **)vrna_alloc(sizeof(int *) * (n3 + 1)); for (i = 0; i <= n3; i++) { c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); in[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); bx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); by[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); inx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); iny[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1)); } /* * ------------------------------------------------------------------------- * end of array initialisation---------------------------------- *maybe int *** would be better */ encode_seqs(s1, s2); /* ------------------------------------------matrix initialisierung */ for (i = 0; i < n3; i++) { for (j = 0; j < n4; j++) { in[i][j] = INF; /* no in before 1 */ c[i][j] = INF; /* no bulge and no in before n2 */ bx[i][j] = INF; /* no bulge before 1 */ by[i][j] = INF; inx[i][j] = INF; /* no bulge before 1 */ iny[i][j] = INF; } } /*--------------------------------------------------------local array*/ /* -------------------------------------------------------------matrix initialisierung */ i = 11; i_length = n3 - 9; while (i < i_length) { j = n4 - 9; min_colonne = INF; while (10 < --j) { int type, type2; type = pair[S1[i]][S2[j]]; /** *** Start duplex **/ c[i][j] = type ? P->DuplexInit + 2 * extension_cost : INF; /** *** update lin bx by linx liny matrix **/ type2 = pair[S2[j + 1]][S1[i - 1]]; /** *** start/extend interior loop **/ in[i][j] = MIN2(c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s, in[i - 1][j] + iext_ass); /** *** start/extend nx1 target *** use same type2 as for in **/ inx[i][j] = MIN2(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s, inx[i - 1][j] + iext_ass); /** *** start/extend 1xn target *** use same type2 as for in **/ iny[i][j] = MIN2(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s, iny[i][j + 1] + iext_ass); /** *** extend interior loop **/ in[i][j] = MIN2(in[i][j], in[i][j + 1] + iext_ass); in[i][j] = MIN2(in[i][j], in[i - 1][j + 1] + iext_s); /** *** start/extend bulge target **/ type2 = pair[S2[j]][S1[i - 1]]; bx[i][j] = MIN2(bx[i - 1][j] + bext, c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0)); /** *** start/extend bulge query **/ type2 = pair[S2[j + 1]][S1[i]]; by[i][j] = MIN2(by[i][j + 1] + bext, c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0)); /** ***end update recursion ***######################## Start stack extension############################## **/ if (!type) continue; c[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P) + 2 * extension_cost; /** *** stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 1]])) c[i][j] = MIN2(c[i - 1][j + 1] + P->stack[rtype[type]][type2] + 2 * extension_cost, c[i][j]); /** *** 1x0 / 0x1 stack extension **/ type2 = pair[S1[i - 1]][S2[j + 2]]; c[i][j] = MIN2( c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost, c[i][j]); type2 = pair[S1[i - 2]][S2[j + 1]]; c[i][j] = MIN2( c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost, c[i][j]); /** *** 1x1 / 2x2 stack extension **/ type2 = pair[S1[i - 2]][S2[j + 2]]; c[i][j] = MIN2( c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 * extension_cost, c[i][j]); type2 = pair[S1[i - 3]][S2[j + 3]]; c[i][j] = MIN2(c[i - 3][j + 3] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 6 * extension_cost, c[i][j]); /** *** 1x2 / 2x1 stack extension *** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to *** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]] **/ type2 = pair[S1[i - 3]][S2[j + 2]]; c[i][j] = MIN2( c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + 5 * extension_cost, c[i][j]); type2 = pair[S1[i - 2]][S2[j + 3]]; c[i][j] = MIN2( c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 5 * extension_cost, c[i][j]); /** *** 2x3 / 3x2 stack extension **/ if ((type2 = pair[S1[i - 4]][S2[j + 3]])) { c[i][j] = MIN2(c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost, c[i][j]); } if ((type2 = pair[S1[i - 3]][S2[j + 4]])) { c[i][j] = MIN2(c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost, c[i][j]); } /** *** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3 **/ /** *** 3x3 or more **/ c[i][j] = MIN2( in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + 2 * extension_cost, c[i][j]); /** *** 2xn or more **/ c[i][j] = MIN2( in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost, c[i][j]); /** *** nx2 or more **/ c[i][j] = MIN2( in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost, c[i][j]); /** *** nx1 n>2 **/ c[i][j] = MIN2( inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost, c[i][j]); /** *** 1xn n>2 **/ c[i][j] = MIN2( iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost, c[i][j]); /** *** nx0 n>1 **/ int bAU; bAU = (type > 2 ? P->TerminalAU : 0); c[i][j] = MIN2(bx[i - 2][j + 1] + 2 * extension_cost + bext + bAU, c[i][j]); /** *** 0xn n>1 **/ c[i][j] = MIN2(by[i - 1][j + 2] + 2 * extension_cost + bext + bAU, c[i][j]); temp = min_colonne; min_colonne = MIN2(c[i][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P) + 2 * extension_cost, min_colonne); if (temp > min_colonne) min_j_colonne = j; /* ---------------------------------------------------------------------end update */ } if (max >= min_colonne) { max = min_colonne; max_pos = i; max_pos_j = min_j_colonne; } i++; } Emin = max; i_min = max_pos; j_min = max_pos_j; int dGe; dGe = 0; struc = fbacktrack(i_min, j_min, extension_cost, il_a, il_b, b_a, b_b, &dGe); if (i_min < n3 - 10) i_min++; if (j_min > 11) j_min--; l1 = strchr(struc, '&') - struc; int size; size = strlen(struc) - 1; Emin -= size * (extension_cost); mfe.i = i_min; mfe.j = j_min; mfe.energy = (double)Emin / 100.; mfe.energy_backtrack = (double)dGe / 100.; mfe.structure = struc; free(S1); free(S2); free(SS1); free(SS2); for (i = 0; i <= n3; i++) { free(c[i]); free(in[i]); free(bx[i]); free(by[i]); free(inx[i]); free(iny[i]); } free(c); free(in); free(bx); free(by); free(inx); free(iny); return mfe; } PRIVATE char * fbacktrack(int i, int j, const int extension_cost, const int il_a, const int il_b, const int b_a, const int b_b, int *dG) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; char *st1, *st2, *struc; int bopen = b_b; int bext = b_a + extension_cost; int iopen = il_b; int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */ int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */ st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1)); i0 = MIN2(i + 1, n3 - 10); j0 = MAX2(j - 1, 11); int state; state = 1; /* we start backtracking from a a pair , i.e. c-matrix */ /* state 1 -> base pair, c * state 2 -> interior loop, in * state 3 -> bx loop, bx * state 4 -> by loop, by */ traced = 1; k = i; l = j; type = pair[S1[i]][S2[j]]; *dG += vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P); /* (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]]; */ while (i > 10 && j <= n4 - 9 && traced) { traced = 0; switch (state) { case 1: type = pair[S1[i]][S2[j]]; int bAU; bAU = (type > 2 ? P->TerminalAU : 0); if (!type) vrna_message_error("backtrack failed in fold duplex"); type2 = pair[S1[i - 1]][S2[j + 1]]; if (type2 && c[i][j] == (c[i - 1][j + 1] + P->stack[rtype[type]][type2] + 2 * extension_cost)) { k = i - 1; l = j + 1; (*dG) += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 1]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost)) { k = i - 1; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 1]]; if (type2 && c[i][j] == (c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost)) { k = i - 2; l = j + 1; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 * extension_cost)) { k = i - 2; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 3][j + 3] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 6 * extension_cost)) { k = i - 3; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 2]]; if (type2 && c[i][j] == (c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + 5 * extension_cost)) { k = i - 3; l = j + 2; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 2]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 5 * extension_cost)) { k = i - 2; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 4]][S2[j + 3]]; if (type2 && c[i][j] == (c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost)) { k = i - 4; l = j + 3; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } type2 = pair[S1[i - 3]][S2[j + 4]]; if (type2 && c[i][j] == (c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost)) { k = i - 3; l = j + 4; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); st1[i - 1] = '('; st2[j - 1] = ')'; i = k; j = l; state = 1; traced = 1; break; } if (c[i][j] == (in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * extension_cost + 2 * iext_s)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 3; j = j + 3; state = 2; traced = 1; break; } if (c[i][j] == (in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 4; j = j + 2; state = 2; traced = 1; break; } if (c[i][j] == (in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 2; j = j + 4; state = 2; traced = 1; break; } if (c[i][j] == (inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 3; j = j + 1; state = 5; traced = 1; break; } if (c[i][j] == (iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 1; j = j + 3; state = 6; traced = 1; break; } if (c[i][j] == (bx[i - 2][j + 1] + 2 * extension_cost + bext + bAU)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 2; j = j + 1; state = 3; traced = 1; break; } if (c[i][j] == (by[i - 1][j + 2] + 2 * extension_cost + bext + bAU)) { k = i; l = j; st1[i - 1] = '('; st2[j - 1] = ')'; i = i - 1; j = j + 2; state = 4; traced = 1; break; } break; case 2: if (in[i][j] == (in[i - 1][j + 1] + iext_s)) { i--; j++; state = 2; traced = 1; break; } if (in[i][j] == (in[i - 1][j] + iext_ass)) { i = i - 1; state = 2; traced = 1; break; } if (in[i][j] == (in[i][j + 1] + iext_ass)) { j++; state = 2; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i - 1]]; if (type2 && in[i][j] == (c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) { int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); i = k; j = l; state = 1; traced = 1; break; } case 3: if (bx[i][j] == (bx[i - 1][j] + bext)) { i--; state = 3; traced = 1; break; } type2 = pair[S2[j]][S1[i - 1]]; if (type2 && bx[i][j] == (c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0))) { int temp; temp = k; k = i - 1; i = temp; temp = l; l = j; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); i = k; j = l; state = 1; traced = 1; break; } case 4: if (by[i][j] == (by[i][j + 1] + bext)) { j++; state = 4; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i]]; if (type2 && by[i][j] == (c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0))) { int temp; temp = k; k = i; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); i = k; j = l; state = 1; traced = 1; break; } case 5: if (inx[i][j] == (inx[i - 1][j] + iext_ass)) { i--; state = 5; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i - 1]]; if (type2 && inx[i][j] == (c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) { int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); i = k; j = l; state = 1; traced = 1; break; } case 6: if (iny[i][j] == (iny[i][j + 1] + iext_ass)) { j++; state = 6; traced = 1; break; } type2 = pair[S2[j + 1]][S1[i - 1]]; if (type2 && iny[i][j] == (c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) { int temp; temp = k; k = i - 1; i = temp; temp = l; l = j + 1; j = temp; type = pair[S1[i]][S2[j]]; *dG += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); i = k; j = l; state = 1; traced = 1; break; } } } if (!traced) { E = c[i][j]; /** *** if (i>1) {E -= P->dangle5[type][SS1[i-1]]+extension_cost; *dG+=P->dangle5[type][SS1[i-1]];} *** if (j<n4){E -= P->dangle3[type][SS2[j+1]]+extension_cost; *dG+=P->dangle3[type][SS2[j+1]];} *** if (type>2) {E -= P->TerminalAU; *dG+=P->TerminalAU;} **/ int correction; correction = vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P); *dG += correction; E -= correction + 2 * extension_cost; if (E != P->DuplexInit + 2 * extension_cost) { vrna_message_error("backtrack failed in second fold duplex"); } else { *dG += P->DuplexInit; st1[i - 1] = '('; st2[j - 1] = ')'; } } if (i > 11) i--; if (j < n4 - 10) j++; struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2); for (k = MAX2(i, 1); k <= i0; k++) if (!st1[k - 1]) st1[k - 1] = '.'; for (k = j0; k <= j; k++) if (!st2[k - 1]) st2[k - 1] = '.'; strcpy(struc, st1 + MAX2(i - 1, 0)); strcat(struc, "&"); strcat(struc, st2 + j0 - 1); /* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */ free(st1); free(st2); return struc; } duplexT ** Lduplexfold(const char *s1, const char *s2, const int threshold, const int extension_cost, const int alignment_length, const int delta, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { /** *** See variable definition in fduplexfold_XS **/ int i, j; int bopen = b_b; int bext = b_a + extension_cost; int iopen = il_b; int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */ int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */ int min_colonne = INF; /* enthaelt das maximum einer kolonne */ int i_length; int max_pos; /* get position of the best hit */ int max_pos_j; int temp = INF; int min_j_colonne; int max = INF; int *position; /* contains the position of the hits with energy > E */ int *position_j; /** *** 1D array corresponding to the standard 2d recursion matrix *** Makes the computation 20% faster **/ int *SA; vrna_md_t md; /** *** variable initialization **/ n1 = (int)strlen(s1); n2 = (int)strlen(s2); /** *** Sequence encoding **/ set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { update_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } encode_seqs(s1, s2); /** *** Position of the high score on the target and query sequence **/ position = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int)); position_j = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int)); /** *** instead of having 4 2-dim arrays we use a unique 1-dim array *** The mapping 2d -> 1D is done based ont the macro *** LCI(i,j,l) ((i )*l + j) *** LINI(i,j,l) ((i + 5)*l + j) *** LBXI(i,j,l) ((i + 10)*l + j) *** LBYI(i,j,l) ((i + 15)*l + j) *** LINIX(i,j,l) ((i + 20)*l + j) *** LINIY(i,j,l) ((i + 25)*l + j) *** *** SA has a length of 5 (number of columns we look back) * *** * 6 (number of structures we look at) * *** * length of the sequence **/ SA = (int *)vrna_alloc(sizeof(int) * 5 * 6 * (n2 + 5)); for (j = n2 + 4; j >= 0; j--) { SA[(j * 30)] = SA[(j * 30) + 1] = SA[(j * 30) + 2] = SA[(j * 30) + 3] = SA[(j * 30) + 4] = INF; SA[(j * 30) + 5] = SA[(j * 30) + 1 + 5] = SA[(j * 30) + 2 + 5] = SA[(j * 30) + 3 + 5] = SA[(j * 30) + 4 + 5] = INF; SA[(j * 30) + 10] = SA[(j * 30) + 1 + 10] = SA[(j * 30) + 2 + 10] = SA[(j * 30) + 3 + 10] = SA[(j * 30) + 4 + 10] = INF; SA[(j * 30) + 15] = SA[(j * 30) + 1 + 15] = SA[(j * 30) + 2 + 15] = SA[(j * 30) + 3 + 15] = SA[(j * 30) + 4 + 15] = INF; SA[(j * 30) + 20] = SA[(j * 30) + 1 + 20] = SA[(j * 30) + 2 + 20] = SA[(j * 30) + 3 + 20] = SA[(j * 30) + 4 + 20] = INF; SA[(j * 30) + 25] = SA[(j * 30) + 1 + 25] = SA[(j * 30) + 2 + 25] = SA[(j * 30) + 3 + 25] = SA[(j * 30) + 4 + 25] = INF; } i = 10; i_length = n1 - 9; while (i < i_length) { int idx = i % 5; int idx_1 = (i - 1) % 5; int idx_2 = (i - 2) % 5; int idx_3 = (i - 3) % 5; int idx_4 = (i - 4) % 5; j = n2 - 9; while (9 < --j) { int type, type2; type = pair[S1[i]][S2[j]]; /** *** Start duplex **/ SA[LCI(idx, j, n2)] = type ? P->DuplexInit + 2 * extension_cost : INF; /** *** update lin bx by linx liny matrix **/ type2 = pair[S2[j + 1]][S1[i - 1]]; /** *** start/extend interior loop **/ SA[LINI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s, SA[LINI(idx_1, j, n2)] + iext_ass); /** *** start/extend nx1 target *** use same type2 as for in **/ SA[LINIX(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s, SA[LINIX(idx_1, j, n2)] + iext_ass); /** *** start/extend 1xn target *** use same type2 as for in **/ SA[LINIY(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s, SA[LINIY(idx, j + 1, n2)] + iext_ass); /** *** extend interior loop **/ SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx, j + 1, n2)] + iext_ass); SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx_1, j + 1, n2)] + iext_s); /** *** start/extend bulge target **/ type2 = pair[S2[j]][S1[i - 1]]; SA[LBXI(idx, j, n2)] = MIN2(SA[LBXI(idx_1, j, n2)] + bext, SA[LCI(idx_1, j, n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0)); /** *** start/extend bulge query **/ type2 = pair[S2[j + 1]][S1[i]]; SA[LBYI(idx, j, n2)] = MIN2(SA[LBYI(idx, j + 1, n2)] + bext, SA[LCI(idx, j + 1, n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0)); /** ***end update recursion ***##################### Start stack extension ###################### **/ if (!type) continue; /** *** stack extension **/ SA[LCI(idx, j, n2)] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P) + 2 * extension_cost; /** *** stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 1]])) SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1, n2)] + P->stack[rtype[type]][type2] + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** 1x0 / 0x1 stack extension **/ if ((type2 = pair[S1[i - 1]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 2, n2)] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 2]][S2[j + 1]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 1, n2)] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost, SA[LCI(idx, j, n2)]); } /** *** 1x1 / 2x2 stack extension **/ if ((type2 = pair[S1[i - 2]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 2, n2)] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 * extension_cost, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 3]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 3, n2)] + P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 6 * extension_cost, SA[LCI(idx, j, n2)]); } /** *** 1x2 / 2x1 stack extension *** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to *** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]] **/ if ((type2 = pair[S1[i - 3]][S2[j + 2]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 2, n2)] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + 5 * extension_cost, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 2]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_2, j + 3, n2)] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 5 * extension_cost, SA[LCI(idx, j, n2)]); } /** *** 2x3 / 3x2 stack extension **/ if ((type2 = pair[S1[i - 4]][S2[j + 3]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_4, j + 3, n2)] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost, SA[LCI(idx, j, n2)]); } if ((type2 = pair[S1[i - 3]][S2[j + 4]])) { SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 4, n2)] + P->internal_loop[5] + P->ninio[2] + P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] + P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost, SA[LCI(idx, j, n2)]); } /** *** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3 **/ /** *** 3x3 or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_3, j + 3, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** 2xn or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_4, j + 2, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** nx2 or more **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINI(idx_2, j + 4, n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** nx1 n>2 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINIX(idx_3, j + 1, n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** 1xn n>2 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LINIY(idx_1, j + 3, n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost, SA[LCI(idx, j, n2)]); /** *** nx0 n>1 **/ int bAU; bAU = (type > 2 ? P->TerminalAU : 0); SA[LCI(idx, j, n2)] = MIN2(SA[LBXI(idx_2, j + 1, n2)] + 2 * extension_cost + bext + bAU, SA[LCI(idx, j, n2)]); /** *** 0xn n>1 **/ SA[LCI(idx, j, n2)] = MIN2(SA[LBYI(idx_1, j + 2, n2)] + 2 * extension_cost + bext + bAU, SA[LCI(idx, j, n2)]); temp = min_colonne; min_colonne = MIN2(SA[LCI(idx, j, n2)] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P) + 2 * extension_cost, min_colonne); if (temp > min_colonne) min_j_colonne = j; } if (max >= min_colonne) { max = min_colonne; max_pos = i; max_pos_j = min_j_colonne; } position[i + delta] = min_colonne; min_colonne = INF; position_j[i + delta] = min_j_colonne; i++; } /* printf("MAX: %d",max); */ free(S1); free(S2); free(SS1); free(SS2); if (max < threshold) { find_max(position, position_j, delta, threshold, alignment_length, s1, s2, extension_cost, fast, il_a, il_b, b_a, b_b); } if (max < INF) { plot_max(max, max_pos, max_pos_j, alignment_length, s1, s2, extension_cost, fast, il_a, il_b, b_a, b_b); } free(SA); free(position); free(position_j); return NULL; } PRIVATE void find_max(const int *position, const int *position_j, const int delta, const int threshold, const int alignment_length, const char *s1, const char *s2, const int extension_cost, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { int pos = n1 - 9; if (fast == 1) { while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; int max; max = position[pos + delta]; printf("target upper bound %d: query lower bound %d (%5.2f) \n", pos - 10, max_pos_j - 10, ((double)max) / 100); pos = MAX2(10, pos + temp_min - delta); } } } else if (fast == 2) { pos = n1 - 9; while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; /* max_pos_j und pos entsprechen die realen position * in der erweiterten sequenz. * pos=1 -> position 1 in the sequence (and not 0 like in C) * max_pos_j -> position 1 in the sequence ( not 0 like in C) */ int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20)); strcpy(s3, "NNNNNNNNNN"); strcpy(s4, "NNNNNNNNNN"); strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1); strcat(s3, "NNNNNNNNNN"); strcat(s4, "NNNNNNNNNN"); s3[end_t - begin_t + 1 + 20] = '\0'; s4[end_q - begin_q + 1 + 20] = '\0'; duplexT test; test = fduplexfold(s3, s4, extension_cost, il_a, il_b, b_a, b_b); if (test.energy * 100 < threshold) { int l1 = strchr(test.structure, '&') - test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1 - 10, begin_t - 10 + test.i - 1 - 10, begin_q - 10 + test.j - 1 - 10, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10, test.energy, test.energy_backtrack, pos - 10, max_pos_j - 10, ((double)position[pos + delta]) / 100); pos = MAX2(10, pos + temp_min - delta); } free(s3); free(s4); free(test.structure); } } } #if 0 else if (fast == 3) { pos = n1 - 9; while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; /* max_pos_j und pos entsprechen die realen position * in der erweiterten sequenz. * pos=1 -> position 1 in the sequence (and not 0 like in C) * max_pos_j -> position 1 in the sequence ( not 0 like in C) */ //Here we can start the reverse recursion for the //Starting from the reported pos / max_pos_j we start the recursion //We have to be careful with the fact that all energies are inverted. int alignment_length2; //Select the smallest interaction length in order to define the new interaction length alignment_length2 = MIN2(n1 - pos + 1, max_pos_j - 1 + 1); // int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20)); strcpy(s3, "NNNNNNNNNN"); strcpy(s4, "NNNNNNNNNN"); strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1); strcat(s3, "NNNNNNNNNN"); strcat(s4, "NNNNNNNNNN"); s3[end_t - begin_t + 1 + 20] = '\0'; s4[end_q - begin_q + 1 + 20] = '\0'; duplexT test; test = fduplexfold(s4, s3, extension_cost, il_a, il_b, b_a, b_b); if (test.energy * 100 < threshold) { int structureLength = strlen(test.structure); int l1 = strchr(test.structure, '&') - test.structure; int start_t, end_t, start_q, end_q; /*reverse structure string*/ char *reverseStructure = (char *)vrna_alloc(sizeof(char) * (structureLength + 1)); int posStructure; for (posStructure = l1 + 1; posStructure < structureLength; posStructure++) { if (test.structure[posStructure] == ')') reverseStructure[posStructure - l1 - 1] = '('; else reverseStructure[posStructure - l1 - 1] = test.structure[posStructure]; } reverseStructure[structureLength - 1 - l1] = '&'; for (posStructure = 0; posStructure < l1; posStructure++) { if (test.structure[posStructure] == '(') reverseStructure[structureLength + posStructure - l1] = ')'; else reverseStructure[structureLength + posStructure - l1] = test.structure[posStructure]; } reverseStructure[structureLength] = '\0'; // l1=strchr(reverse.structure, '&')-test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", reverseStructure, begin_t - 10 + test.j - 1 - 10, (begin_t - 11) + test.j + strlen(test.structure) - l1 - 2 - 10, begin_q - 10 + test.i - l1 - 10, begin_q - 10 + test.i - 1 - 10, test.energy, test.energy_backtrack, pos, max_pos_j, ((double)position[pos + delta]) / 100); pos = MAX2(10, pos + temp_min - delta); } free(s3); free(s4); free(test.structure); } } } #endif else { pos = n1 - 9; while (10 < pos--) { int temp_min = 0; if (position[pos + delta] < (threshold)) { int search_range; search_range = delta + 1; while (--search_range) if (position[pos + delta - search_range] <= position[pos + delta - temp_min]) temp_min = search_range; pos -= temp_min; int max_pos_j; max_pos_j = position_j[pos + delta]; /* max_pos_j und pos entsprechen die realen position * in der erweiterten sequenz. * pos=1 -> position 1 in the sequence (and not 0 like in C) * max_pos_j -> position 1 in the sequence ( not 0 like in C) */ int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2)); strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1); s3[end_t - begin_t + 1] = '\0'; s4[end_q - begin_q + 1] = '\0'; duplexT test; test = duplexfold(s3, s4, extension_cost); if (test.energy * 100 < threshold) { int l1 = strchr(test.structure, '&') - test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f) i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1, begin_t - 10 + test.i - 1, begin_q - 10 + test.j - 1, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2, test.energy, pos - 10, max_pos_j - 10, ((double)position[pos + delta]) / 100); pos = MAX2(10, pos + temp_min - delta); } free(s3); free(s4); free(test.structure); } } } } PRIVATE void plot_max(const int max, const int max_pos, const int max_pos_j, const int alignment_length, const char *s1, const char *s2, const int extension_cost, const int fast, const int il_a, const int il_b, const int b_a, const int b_b) { if (fast == 1) { printf("target upper bound %d: query lower bound %d (%5.2f)\n", max_pos - 10, max_pos_j - 10, ((double)max) / 100); } else if (fast == 2) { int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, max_pos - alignment_length2 + 1); /* 10 */ int end_t = MIN2(n1 - 10, max_pos + 1); int begin_q = MAX2(11, max_pos_j - 1); /* 10 */ int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20)); strcpy(s3, "NNNNNNNNNN"); strcpy(s4, "NNNNNNNNNN"); strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1); strcat(s3, "NNNNNNNNNN"); strcat(s4, "NNNNNNNNNN"); s3[end_t - begin_t + 1 + 20] = '\0'; s4[end_q - begin_q + 1 + 20] = '\0'; duplexT test; test = fduplexfold(s3, s4, extension_cost, il_a, il_b, b_a, b_b); int l1 = strchr(test.structure, '&') - test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1 - 10, begin_t - 10 + test.i - 1 - 10, begin_q - 10 + test.j - 1 - 10, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10, test.energy, test.energy_backtrack, max_pos - 10, max_pos_j - 10, ((double)max) / 100); free(s3); free(s4); free(test.structure); } else { duplexT test; int alignment_length2; alignment_length2 = MIN2(n1, n2); int begin_t = MAX2(11, max_pos - alignment_length2 + 1); int end_t = MIN2(n1 - 10, max_pos + 1); int begin_q = MAX2(11, max_pos_j - 1); int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2)); char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2)); strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1); strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1); s3[end_t - begin_t + 1] = '\0'; s4[end_q - begin_q + 1] = '\0'; test = duplexfold(s3, s4, extension_cost); int l1 = strchr(test.structure, '&') - test.structure; printf("%s %3d,%-3d : %3d,%-3d (%5.2f) i:%d,j:%d <%5.2f>\n", test.structure, begin_t - 10 + test.i - l1, begin_t - 10 + test.i - 1, begin_q - 10 + test.j - 1, (begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2, test.energy, max_pos - 10, max_pos_j - 10, ((double)max) / 100); free(s3); free(s4); free(test.structure); } } PRIVATE void update_dfold_params(void) { vrna_md_t md; if (P) free(P); set_model_details(&md); P = vrna_params(&md); make_pair_matrix(); } PRIVATE void encode_seqs(const char *s1, const char *s2) { unsigned int i, l; l = strlen(s1); S1 = encode_seq(s1); SS1 = (short *)vrna_alloc(sizeof(short) * (l + 1)); /* SS1 exists only for the special X K and I bases and energy_set!=0 */ for (i = 1; i <= l; i++) /* make numerical encoding of sequence */ SS1[i] = alias[S1[i]]; /* for mismatches of nostandard bases */ l = strlen(s2); S2 = encode_seq(s2); SS2 = (short *)vrna_alloc(sizeof(short) * (l + 1)); /* SS2 exists only for the special X K and I bases and energy_set!=0 */ for (i = 1; i <= l; i++) /* make numerical encoding of sequence */ SS2[i] = alias[S2[i]]; /* for mismatches of nostandard bases */ } PRIVATE short * encode_seq(const char *sequence) { unsigned int i, l; short *S; l = strlen(sequence); S = (short *)vrna_alloc(sizeof(short) * (l + 2)); S[0] = (short)l; /* make numerical encoding of sequence */ for (i = 1; i <= l; i++) S[i] = (short)encode_char(toupper(sequence[i - 1])); /* for circular folding add first base at position n+1 */ S[l + 1] = S[1]; return S; } int arraySize(duplexT **array) { int site_count = 0; while (array[site_count] != NULL) site_count++; return site_count; } void freeDuplexT(duplexT **array) { int size = arraySize(array); while (--size) { free(array[size]->structure); free(array[size]); } free(array[0]->structure); free(array); }
move.h
#pragma once #include "core.h" #include "energy.h" #include "average.h" //#include "analysis.h" #include "potentials.h" #include "mpi.h" namespace Faunus { namespace Move { class Movebase { private: virtual void _move(Change&)=0; //!< Perform move and modify change object virtual void _accept(Change&) {}; //!< Call after move is accepted virtual void _reject(Change&) {}; //!< Call after move is rejected virtual void _to_json(json &j) const=0; //!< Extra info for report if needed virtual void _from_json(const json &j)=0; //!< Extra info for report if needed TimeRelativeOfTotal<std::chrono::microseconds> timer; protected: unsigned long cnt=0; unsigned long accepted=0; unsigned long rejected=0; public: static Random slump; //!< Shared for all moves std::string name; //!< Name of move std::string cite; //!< Reference int repeat=1; //!< How many times the move should be repeated per sweep inline void from_json(const json &j) { auto it = j.find("repeat"); if (it!=j.end()) { if (it->is_number()) repeat = it->get<double>(); else if (it->is_string()) if (it->get<std::string>()=="N") repeat = -1; } _from_json(j); if (repeat<0) repeat=0; } inline void to_json(json &j) const { _to_json(j); j["relative time"] = timer.result(); j["acceptance"] = double(accepted)/cnt; j["repeat"] = repeat; j["moves"] = cnt; if (!cite.empty()) j["cite"] = cite; _roundjson(j, 3); } //!< JSON report w. statistics, output etc. inline void move(Change &change) { timer.start(); cnt++; change.clear(); _move(change); if (change.empty()) timer.stop(); } //!< Perform move and modify given change object inline void accept(Change &c) { accepted++; _accept(c); timer.stop(); } inline void reject(Change &c) { rejected++; _reject(c); timer.stop(); } inline virtual double bias(Change &c, double uold, double unew) { return 0; // du } //!< adds extra energy change not captured by the Hamiltonian }; Random Movebase::slump; // static instance of Random (shared for all moves) inline void from_json(const json &j, Movebase &m) { m.from_json( j ); } //!< Configure any move via json inline void to_json(json &j, const Movebase &m) { assert( !m.name.empty() ); m.to_json(j[m.name]); } /** * @brief Swap the charge of a single atom */ template<typename Tspace> class AtomicSwapCharge : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; double ln10 = log(10); double pKa, pH; Average<double> msqd; // mean squared displacement double _sqd, _bias; // squared displament std::string molname; // name of molecule to operate on Change::data cdata; void _to_json(json &j) const override { j = { {"pH", pH}, {"pka", pKa}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molname} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); pH = j.at("pH").get<double>(); pKa = j.at("pKa").get<double>(); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat = repeat * v.front().size(); // ...and for each atom } } catch (std::exception &e) { std::cerr << name << ": " << e.what(); throw; } } //!< Configure via json object typename Tpvec::iterator randomAtom() { assert(molid>=0); auto mollist = spc.findMolecules( molid ); // all `molid` groups if (size(mollist)>0) { auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator if (!git->empty()) { auto p = slump.sample( git->begin(), git->end() ); // random particle iterator cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group return p; } } return spc.p.end(); } void _move(Change &change) override { auto p = randomAtom(); if (p!=spc.p.end()) { auto& g = spc.groups[cdata.index]; double oldcharge = p->charge; p->charge = fabs(oldcharge - 1); _sqd = fabs(oldcharge - 1) - oldcharge; change.groups.push_back( cdata ); // add to list of moved groups _bias = _sqd*(pH-pKa)*ln10; // one may add bias here... } else std::cerr << name << ": no atoms found" << std::endl; } double bias(Change &change, double uold, double unew) override { return _bias; } //!< adds extra energy change not captured by the Hamiltonian void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: AtomicSwapCharge(Tspace &spc) : spc(spc) { name = "swapcharge"; repeat = -1; // meaning repeat N times cdata.atoms.resize(1); cdata.internal=true; } }; /** * @brief Translate and rotate a molecular group */ template<typename Tspace> class AtomicTranslateRotate : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; Point dir={1,1,1}; Average<double> msqd; // mean squared displacement double _sqd; // squared displament std::string molname; // name of molecule to operate on Change::data cdata; void _to_json(json &j) const override { j = { {"dir", dir}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molname} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); if (repeat<0) { auto v = spc.findMolecules(molid, Tspace::ALL ); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat = repeat * v.front().size(); // ...and for each atom } } catch (std::exception &e) { std::cerr << name << ": " << e.what(); throw; } } //!< Configure via json object typename Tpvec::iterator randomAtom() { assert(molid>=0); //std::cout<<"molid "<<molid<<std::endl; auto mollist = spc.findMolecules( molid, Tspace::ALL ); // all `molid` groups if (size(mollist)>0) { //std::cout<<"looking for atoms"<<std::endl; auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator if (!git->empty()) { //std::cout<<"found molecule"<<std::endl; auto p = slump.sample( git->begin(), git->end() ); // random particle iterator cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group return p; } } return spc.p.end(); } void _move(Change &change) override { auto p = randomAtom(); if (p!=spc.p.end()) { double dp = atoms<Tparticle>.at(p->id).dp; double dprot = atoms<Tparticle>.at(p->id).dprot; auto& g = spc.groups[cdata.index]; if (dp>0) { // translate Point oldpos = p->pos; p->pos += 0.5 * dp * ranunit(slump).cwiseProduct(dir); spc.geo.boundaryFunc(p->pos); _sqd = spc.geo.sqdist(oldpos, p->pos); // squared displacement if (!g.atomic) g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.cm); } if (dprot>0) { // rotate Point u = ranunit(slump); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); p->rotate(Q, Q.toRotationMatrix()); } if (dp>0 || dprot>0) change.groups.push_back( cdata ); // add to list of moved groups } else std::cerr << name << ": no atoms found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: AtomicTranslateRotate(Tspace &spc) : spc(spc) { name = "transrot"; repeat = -1; // meaning repeat N times cdata.atoms.resize(1); cdata.internal=true; } }; /** * @brief Translate and rotate a molecular group */ template<typename Tspace> class TranslateRotate : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; // Space to operate on int molid=-1; double dptrans=0; double dprot=0; Point dir={1,1,1}; double _sqd; // squared displacement Average<double> msqd; // mean squared displacement void _to_json(json &j) const override { j = { {"dir", dir}, {"dp", dptrans}, {"dprot", dprot}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molecules<Tpvec>[molid].name} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { std::string molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); dprot = j.at("dprot"); dptrans = j.at("dp"); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); } } catch (std::exception &e) { throw std::runtime_error(name+": " + e.what()); } } //!< Configure via json object void _move(Change &change) override { assert(molid>=0); assert(!spc.groups.empty()); assert(spc.geo.getVolume()>0); // pick random group from the system matching molecule type // TODO: This can be slow -- implement look-up-table in Space auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid' if (size(mollist)>0) { auto it = slump.sample( mollist.begin(), mollist.end() ); if (!it->empty()) { assert(it->id==molid); if (dptrans>0) { // translate Point oldcm = it->cm; Point dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans; it->translate( dp, spc.geo.boundaryFunc ); _sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement } if (dprot>0) { // rotate Point u = ranunit(slump); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); it->rotate(Q, spc.geo.boundaryFunc); } if (dptrans>0||dprot>0) { // define changes Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups } assert( spc.geo.sqdist( it->cm, Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 ); } } else std::cerr << name << ": no molecules found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: TranslateRotate(Tspace &spc) : spc(spc) { name = "moltransrot"; repeat = -1; // meaning repeat N times } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] TranslateRotate") { typedef Particle<Radius, Charge, Dipole, Cigar> Tparticle; typedef Space<Geometry::Cuboid, Tparticle> Tspace; typedef typename Tspace::Tpvec Tpvec; CHECK( !atoms<Tparticle>.empty() ); // set in a previous test CHECK( !molecules<Tpvec>.empty() ); // set in a previous test Tspace spc; TranslateRotate<Tspace> mv(spc); json j = R"( {"molecule":"B", "dp":1.0, "dprot":0.5, "dir":[0,1,0], "repeat":2 })"_json; mv.from_json(j); j = json(mv).at(mv.name); CHECK( j.at("molecule") == "B"); CHECK( j.at("dir") == Point(0,1,0) ); CHECK( j.at("dp") == 1.0 ); CHECK( j.at("repeat") == 2 ); CHECK( j.at("dprot") == 0.5 ); } #endif /** * @brief QuadrantJump translates a molecule to another quadrant * considering as the origin the center of the box or the center of mass of a range of atomic indexes * specified by "index": [start:stop]. */ template<typename Tspace> class QuadrantJump : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; Point dir={1,1,1}; std::vector<size_t> index; double _sqd; // squared displacement Average<double> msqd; // mean squared displacement void _to_json(json &j) const override { j = { {"dir", dir}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molecules<Tpvec>[molid].name} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { std::string molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); index = j.value("index", decltype(index)()); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); } } catch (std::exception &e) { throw std::runtime_error(name+": " + e.what()); } } //!< Configure via json object void _move(Change &change) override { assert(molid>=0); assert(!spc.groups.empty()); assert(spc.geo.getVolume()>0); // pick random group from the system matching molecule type // TODO: This can be slow -- implement look-up-table in Space auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid' if (size(mollist)>0) { auto it = slump.sample( mollist.begin(), mollist.end() ); if (not it->empty()) { assert(it->id==molid); Point oldcm = it->cm; if (index.size()==2) { Group<Tparticle> g(spc.p.begin(), spc.p.end()); auto cm_O = Geometry::massCenter(g.begin()+index[0], g.begin()+index[1], spc.geo.boundaryFunc ); it->translate( -2*spc.geo.vdist(oldcm, cm_O).cwiseProduct(dir.cast<double>()), spc.geo.boundaryFunc ); } else { it->translate( -2*oldcm.cwiseProduct(dir.cast<double>()), spc.geo.boundaryFunc ); } _sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups assert( spc.geo.sqdist( it->cm, Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 ); } } else std::cerr << name << ": no molecules found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: QuadrantJump(Tspace &spc) : spc(spc) { name = "quadrantjump"; repeat = -1; // meaning repeat N times } }; template<typename Tspace> class VolumeMove : public Movebase { private: const std::map<std::string, Geometry::VolumeMethod> methods = { {"xy", Geometry::XY}, {"isotropic", Geometry::ISOTROPIC}, {"isochoric", Geometry::ISOCHORIC} }; typename decltype(methods)::const_iterator method; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Average<double> msqd; // mean squared displacement double dV=0, deltaV=0, Vnew=0, Vold=0; void _to_json(json &j) const override { using namespace u8; j = { {"dV", dV}, {"method", method->first}, {rootof + bracket(Delta + "V" + squared), std::sqrt(msqd.avg())}, {cuberoot + rootof + bracket(Delta + "V" + squared), std::cbrt(std::sqrt(msqd.avg()))} }; _roundjson(j,3); } void _from_json(const json &j) override { method = methods.find( j.value("method", "isotropic") ); if (method==methods.end()) std::runtime_error("unknown volume change method"); dV = j.at("dV"); } void _move(Change &change) override { if (dV>0) { change.dV=true; change.all=true; Vold = spc.geo.getVolume(); if (method->second == Geometry::ISOCHORIC) Vold = std::pow(Vold,1.0/3.0); // volume is constant Vnew = std::exp(std::log(Vold) + (slump()-0.5) * dV); deltaV = Vnew-Vold; spc.scaleVolume(Vnew, method->second); } else deltaV=0; } void _accept(Change &change) override { msqd += deltaV*deltaV; } void _reject(Change &change) override { msqd += 0; } public: VolumeMove(Tspace &spc) : spc(spc) { name = "volume"; repeat = 1; } }; // end of VolumeMove /* * @brief Establishes equilibrium of matter * Establishes equilibrium of matter between all species * * Consider the dissociation process AX=A+X. This class will locate * all species of type AX and A and make a MC swap move between them. * X is implicit, meaning that it enters only with its chemical potential * (activity). The titrating species, their dissociation constants * and the chemical potential of the titrant are read from a * `processes` JSON object. * For example, for proton titration of phosphate one would * use the following JSON input (pH 7.0): * * @todo * Implement classification of reactions to group weight in * mc sweep {refrerence : prob(reference)} * */ template<typename Tspace> class SpeciationMove : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Tspace *otherspc; ReactionData<Tpvec> *trialprocess; std::map<std::string, Average<double>> accmap; double log_k; bool forward; std::vector<int> molDel; // index of groups to delete std::vector<int> atomDel; // atom index to delete std::map<int, int> molcnt_ins, atomcnt_ins, molcnt_del, atomcnt_del, molcnt, atomcnt; // id's and number of inserted/deleted mols and atoms std::multimap<int, Tpvec> pmap; // coordinates of mols and atoms to be inserted unsigned int Ndeleted, Ninserted; // Number of accepted deletions and insertions void _to_json(json &j) const override { j = { // { "replicas", mpi.nproc() }, // { "datasize", pt.getFormat() } }; json &_j = j["reactions"]; _j = json::object(); for (auto &m : accmap) _j[m.first] = { {"attempts", m.second.cnt}, {"acceptance", m.second.avg()} }; } void _from_json(const json &j) override { //j["speciation"] = "speciation"; } public: SpeciationMove(Tspace &spc) : spc(spc) { name = "speciation"; repeat = 1; } void setOther(Tspace &ospc) { otherspc = &ospc; } double energy(); //!< Returns intrinsic energy of the process void _move(Change &change) override { if ( reactions<Tpvec>.size()>0 ) { auto rit = slump.sample( reactions<Tpvec>.begin(), reactions<Tpvec>.end() ); log_k = rit->log_k; forward = (bool)slump.range(0,1); // random boolean trialprocess = &(*rit); if ( rit->empty(forward) ) // Enforce canonic constraint if invoked return; //Out of material, slip out the back door for (auto &m : rit->Molecules2Add( !forward )) { // Delete checks auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto git = mollist.begin(); if ( git->size() < m.second ) // assure that there are atoms enough in the group return; } else { mollist = spc.findMolecules( m.first, Tspace::ACTIVE); if ( size(mollist) < m.second ) return; // Not possible to perform change, escape through the back door } } for (auto &m : rit->Molecules2Add( forward )) { // Addition checks auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto git = mollist.begin(); if ( (git->size() + m.second) > git->capacity() ) // assure that there are atoms enough in the group return; // if not slip out the back door } else { mollist = spc.findMolecules( m.first, Tspace::INACTIVE); if ( size(mollist) < m.second ) return; // Not possible to perform change, escape through the back door } } //The move is doable, raise flag change.dNpart=true; for (auto &m : rit->Molecules2Add( !forward )) { // Delete auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); Change::data d; auto git = mollist.begin(); auto othermollist = otherspc->findMolecules(m.first, Tspace::ALL); // implies that new and old are in sync auto othergit=othermollist.begin(); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.internal = true; d.dNpart = true; for ( int N=0; N<m.second; N++ ) { // deactivate m.second m.first atoms auto ait = slump.sample( git->begin(), git->end()); // iterator to random atom // Shuffle back to end, both in trial and new auto nait = git->end()-1; //iterator to last atom int dist = Faunus::distance( ait, git->end() ); // distance to random atom from end if ( Faunus::distance( ait, nait) > 1 ) { std::iter_swap(ait, nait); std::iter_swap(othergit->end()-dist-N, othergit->end() - (1+N) ); } d.atoms.push_back ( Faunus::distance(git->begin(), nait) ); git->deactivate( nait, git->end()); } std::sort( d.atoms.begin(), d.atoms.end() ); change.groups.push_back( d ); // add to list of moved groups } else { mollist = spc.findMolecules( m.first, Tspace::ACTIVE); for ( int N=0; N <m.second; N++ ) { Change::data d; auto git = slump.sample(mollist.begin(), mollist.end()); git->deactivate( git->begin(), git->end()); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups mollist = spc.findMolecules( m.first , Tspace::ACTIVE); // Activate/deactivate all? simply move end to front? } } } for (auto &m : rit->Molecules2Add( forward )) { // Add auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { Change::data d; auto git = mollist.begin(); d.index = Faunus::distance( spc.groups.begin(), git); d.internal = true; d.dNpart = true; for ( int N=0; N<m.second; N++ ) { // activate m.second m.first atoms git->activate( git->end(), git->end() + 1); auto ait = git->end()-1; spc.geo.randompos(ait->pos, slump); spc.geo.boundaryFunc(ait->pos); d.atoms.push_back( Faunus::distance(git->begin(), ait) ); // index of particle rel. to group } std::sort( d.atoms.begin(), d.atoms.end()); change.groups.push_back( d ); // add to list of moved groups } else { mollist = spc.findMolecules( m.first, Tspace::INACTIVE); if ( size(mollist) < m.second ) { change.dNpart=false; return; // Not possible to perform change, escape through the back door } for ( int N=0; N <m.second; N++ ) { Change::data d; auto git = slump.sample(mollist.begin(), mollist.end()); git->activate( git->inactive().begin(), git->inactive().end()); Point oldcm = git->cm; spc.geo.randompos(oldcm, random); git->translate( oldcm, spc.geo.boundaryFunc ); oldcm = ranunit(slump); Eigen::Quaterniond Q( Eigen::AngleAxisd(2*pc::pi*random(), oldcm) ); git->rotate(Q, spc.geo.boundaryFunc); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups mollist = spc.findMolecules( m.first , Tspace::INACTIVE); } } } std::sort(change.groups.begin(), change.groups.end() ); } else throw std::runtime_error("No reactions in list, disable speciation or add reactions"); } double bias(Change &change, double uold, double unew) override { if (forward) return -log_k*std::log(10); return log_k*std::log(10); } //!< adds extra energy change not captured by the Hamiltonian void _accept(Change &change) override { accmap[ trialprocess->name ] += 1; trialprocess->N_reservoir += (forward == true) ? -1 : 1; if( trialprocess->N_reservoir < 0 && trialprocess->canonic == true ) throw std::runtime_error("There are no negative number of molecules"); } void _reject(Change &change) override { accmap[ trialprocess->name ] += 0; } }; // End of class SpeciationMove template<typename Tspace> class Cluster : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Average<double> msqd, msqd_angle, N; double thresholdsq=0, dptrans=0, dprot=0, angle=0, _bias=0; Point dir={1,1,1}, dp; std::vector<std::string> names; std::vector<int> ids; std::vector<size_t> index; // all possible molecules to move void _to_json(json &j) const override { using namespace u8; j = { {"threshold", std::sqrt(thresholdsq)}, {"dir", dir}, {"dp", dptrans}, {"dprot", dprot}, {rootof + bracket("r" + squared), std::sqrt(msqd.avg())}, {rootof + bracket(theta + squared) + "/" + degrees, std::sqrt(msqd_angle.avg()) / 1.0_deg}, {bracket("N"), N.avg()} }; _roundjson(j,3); } void _from_json(const json &j) override { dptrans = j.at("dp"); dir = j.value("dir", Point(1,1,1)); dprot = j.at("dprot"); thresholdsq = std::pow(j.at("threshold").get<double>(), 2); names = j.at("molecules").get<decltype(names)>(); // molecule names ids = names2ids(molecules<Tpvec>, names); // names --> molids index.clear(); for (auto &g : spc.groups) if (!g.atomic) if (std::find(ids.begin(), ids.end(), g.id)!=ids.end() ) index.push_back( &g-&spc.groups.front() ); if (repeat<0) repeat = index.size(); } void findCluster(Tspace &spc, size_t first, std::set<size_t>& cluster) { std::set<size_t> pool(index.begin(), index.end()); cluster.clear(); cluster.insert(first); pool.erase(first); size_t n; do { // find cluster (not very clever...) n = cluster.size(); for (size_t i : cluster) if (!spc.groups[i].empty()) // check if group is inactive for (size_t j : pool) if (!spc.groups[j].empty()) // check if group is inactive if (i!=j) if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)<=thresholdsq) { cluster.insert(j); pool.erase(j); } } while (cluster.size()!=n); // check if cluster is too large double max = spc.geo.getLength().minCoeff()/2; for (auto i : cluster) for (auto j : cluster) if (j>i) if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)>=max*max) throw std::runtime_error(name+": cluster larger than half box length"); } void _move(Change &change) override { if (thresholdsq>0 && !index.empty()) { std::set<size_t> cluster; // all group index in cluster size_t first = *slump.sample(index.begin(), index.end()); // random molecule (nuclei) findCluster(spc, first, cluster); // find cluster around first N += cluster.size(); // average cluster size Change::data d; d.all=true; dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans; angle = dprot * (slump()-0.5); Point COM = Geometry::trigoCom(spc, cluster); // cluster center Eigen::Quaterniond Q; Q = Eigen::AngleAxisd(angle, ranunit(slump)); // quaternion for (auto i : cluster) { // loop over molecules in cluster auto &g = spc.groups[i]; Geometry::rotate(g.begin(), g.end(), Q, spc.geo.boundaryFunc, -COM); g.cm = g.cm-COM; spc.geo.boundary(g.cm); g.cm = Q*g.cm+COM; spc.geo.boundary(g.cm); g.translate( dp, spc.geo.boundaryFunc ); d.index=i; change.groups.push_back(d); } _bias += 0; // one may add bias here... #ifndef NDEBUG Point newCOM = Geometry::trigoCom(spc, cluster); double _zero = std::sqrt( spc.geo.sqdist(COM,newCOM) ) - dp.norm(); if (fabs(_zero)>1) std::cerr << _zero << " "; #endif } } double bias(Change &change, double uold, double unew) override { return _bias; } //!< adds extra energy change not captured by the Hamiltonian void _reject(Change &change) override { msqd += 0; msqd_angle += 0; } void _accept(Change &change) override { msqd += dp.squaredNorm(); msqd_angle += angle*angle; } public: Cluster(Tspace &spc) : spc(spc) { cite = "doi:10/cj9gnn"; name = "cluster"; repeat = -1; // meaning repeat N times } }; template<typename Tspace> class Pivot : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; std::vector<std::reference_wrapper<const Potential::BondData>> bonds; std::vector<int> index; // atom index to rotate Tspace& spc; std::string molname; int molid; double dprot; double d2; // cm movement, squared Average<double> msqd; // cm mean squared displacement void _to_json(json &j) const override { using namespace u8; j = { {"molecule", molname}, {"dprot", dprot}, {u8::rootof + u8::bracket("r_cm" + u8::squared), std::sqrt(msqd.avg())} }; _roundjson(j,3); } void _from_json(const json &j) override { dprot = j.at("dprot"); molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); bonds = Potential::filterBonds( molecules<Tpvec>[molid].bonds, Potential::BondData::harmonic); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat *= bonds.size(); } } void _move(Change &change) override { d2=0; if (std::fabs(dprot)>1e-9) { auto it = spc.randomMolecule(molid, slump); if (it!=spc.groups.end()) if (it->size()>2) { auto b = slump.sample(bonds.begin(), bonds.end()); // random harmonic bond if (b != bonds.end()) { int i1 = b->get().index.at(0); int i2 = b->get().index.at(1); int offset = std::distance( spc.p.begin(), it->begin() ); index.clear(); if (slump()>0.0) for (size_t i=i2+1; i<it->size(); i++) index.push_back(i+offset); else for (int i=0; i<i1; i++) index.push_back(i+offset); i1+=offset; i2+=offset; if (!index.empty()) { Point oldcm = it->cm; it->unwrap(spc.geo.distanceFunc); // remove pbc Point u = (spc.p[i1].pos - spc.p[i2].pos).normalized(); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); auto M = Q.toRotationMatrix(); for (auto i : index) { spc.p[i].rotate(Q, M); // internal rot. spc.p[i].pos = Q * ( spc.p[i].pos - spc.p[i1].pos) + spc.p[i1].pos; // positional rot. } it->cm = Geometry::massCenter(it->begin(), it->end()); it->wrap(spc.geo.boundaryFunc); // re-apply pbc d2 = spc.geo.sqdist(it->cm, oldcm); // CM movement Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups } } } } } void _accept(Change &change) override { msqd += d2; } void _reject(Change &change) override { msqd += 0; } public: Pivot(Tspace &spc) : spc(spc) { name = "pivot"; repeat = -1; // --> repeat=N } }; //!< Pivot move around random harmonic bond axis #ifdef ENABLE_MPI /** * @brief Class for parallel tempering (aka replica exchange) using MPI * * Although not completely correct, the recommended way of performing a temper move * is to do `N` Monte Carlo passes with regular moves and then do a tempering move. * This is because the MPI nodes must be in sync and if you have a system where * the random number generator calls are influenced by the Hamiltonian we could * end up in a deadlock. * * @date Lund 2012, 2018 */ template<class Tspace> class ParallelTempering : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on MPI::MPIController& mpi; int partner; //!< Exchange replica (partner) enum extradata {VOLUME=0}; //!< Structure of extra data to send std::map<std::string, Average<double>> accmap; MPI::FloatTransmitter ft; //!< Class for transmitting floats over MPI MPI::ParticleTransmitter<Tpvec> pt;//!< Class for transmitting particles over MPI void findPartner() { int dr=0; partner = mpi.rank(); (mpi.random()>0.5) ? dr++ : dr--; (mpi.rank() % 2 == 0) ? partner+=dr : partner-=dr; } //!< Find replica to exchange with bool goodPartner() { assert(partner!=mpi.rank() && "Selfpartner! This is not supposed to happen."); if (partner>=0) if ( partner<mpi.nproc() ) if ( partner!=mpi.rank() ) return true; return false; } //!< Is partner valid? void _to_json(json &j) const override { j = { { "replicas", mpi.nproc() }, { "datasize", pt.getFormat() } }; json &_j = j["exchange"]; _j = json::object(); for (auto &m : accmap) _j[m.first] = { {"attempts", m.second.cnt}, {"acceptance", m.second.avg()} }; } void _move(Change &change) override { double Vold = spc.geo.getVolume(); findPartner(); Tpvec p; // temperary storage p.resize(spc.p.size()); if (goodPartner()) { change.all=true; pt.sendExtra[VOLUME]=Vold; // copy current volume for sending pt.recv(mpi, partner, p); // receive particles pt.send(mpi, spc.p, partner); // send everything pt.waitrecv(); pt.waitsend(); double Vnew = pt.recvExtra[VOLUME]; if (Vnew<1e-9 || spc.p.size() != p.size()) MPI_Abort(mpi.comm, 1); if (std::fabs(Vnew-Vold)>1e-9) change.dV=true; spc.p = p; spc.geo.setVolume(Vnew); // update mass centers for (auto& g : spc.groups) if (g.atomic==false) g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.begin()->pos); } } double exchangeEnergy(double mydu) { std::vector<MPI::FloatTransmitter::floatp> duSelf(1), duPartner; duSelf[0]=mydu; duPartner = ft.swapf(mpi, duSelf, partner); return duPartner.at(0); // return partner energy change } //!< Exchange energy with partner double bias(Change &change, double uold, double unew) override { return exchangeEnergy(unew-uold); // Exchange dU with partner (MPI) } std::string id() { std::ostringstream o; if (mpi.rank() < partner) o << mpi.rank() << " <-> " << partner; else o << partner << " <-> " << mpi.rank(); return o.str(); } //!< Unique string to identify set of partners void _accept(Change &change) override { if ( goodPartner() ) accmap[ id() ] += 1; } void _reject(Change &change) override { if ( goodPartner() ) accmap[ id() ] += 0; } void _from_json(const json &j) override { pt.setFormat( j.value("format", std::string("XYZQI") ) ); } public: ParallelTempering(Tspace &spc, MPI::MPIController &mpi ) : spc(spc), mpi(mpi) { name="temper"; partner=-1; pt.recvExtra.resize(1); pt.sendExtra.resize(1); } }; #endif template<typename Tspace> class Propagator : public BasePointerVector<Movebase> { private: int _repeat; std::discrete_distribution<> dist; std::vector<double> w; // list of weights for each move void addWeight(double weight=1) { w.push_back(weight); dist = std::discrete_distribution<>(w.begin(), w.end()); _repeat = int(std::accumulate(w.begin(), w.end(), 0.0)); } public: using BasePointerVector<Movebase>::vec; inline Propagator() {} inline Propagator(const json &j, Tspace &spc, MPI::MPIController &mpi) { if (j.count("random")==1) Movebase::slump = j["random"]; // slump is static --> shared for all moves for (auto &m : j.at("moves")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { #ifdef ENABLE_MPI if (it.key()=="temper") this->template push_back<Move::ParallelTempering<Tspace>>(spc, mpi); #endif if (it.key()=="moltransrot") this->template push_back<Move::TranslateRotate<Tspace>>(spc); if (it.key()=="quadrantjump") this->template push_back<Move::QuadrantJump<Tspace>>(spc); if (it.key()=="transrot") this->template push_back<Move::AtomicTranslateRotate<Tspace>>(spc); if (it.key()=="pivot") this->template push_back<Move::Pivot<Tspace>>(spc); if (it.key()=="volume") this->template push_back<Move::VolumeMove<Tspace>>(spc); if (it.key()=="speciation") this->template push_back<Move::SpeciationMove<Tspace>>(spc); if (it.key()=="cluster") this->template push_back<Move::Cluster<Tspace>>(spc); if (vec.size()==oldsize+1) { vec.back()->from_json( it.value() ); addWeight(vec.back()->repeat); } else std::cerr << "warning: ignoring unknown move '" << it.key() << "'" << endl; } catch (std::exception &e) { throw std::runtime_error("Error adding move '" + it.key() + "': " + e.what()); } } } } int repeat() { return _repeat; } auto sample() { if (!vec.empty()) { assert(w.size() == vec.size()); return vec.begin() + dist( Move::Movebase::slump.engine ); } return vec.end(); } //!< Pick move from a weighted, random distribution }; }//Move namespace template<class Tgeometry, class Tparticle> class MCSimulation { private: typedef Space<Tgeometry, Tparticle> Tspace; typedef typename Tspace::Tpvec Tpvec; bool metropolis(double du) const { if (std::isnan(du)) return false; if (du<0) return true; return ( Move::Movebase::slump() > std::exp(-du)) ? false : true; } //!< Metropolis criterion (true=accept) struct State { Tspace spc; Energy::Hamiltonian<Tspace> pot; State(const json &j) : spc(j), pot(spc,j) {} void sync(State &other, Change &change) { spc.sync( other.spc, change ); pot.sync( &other.pot, change ); } }; //!< Contains everything to describe a state State state1, // old state state2; // new state (trial); double uinit=0, dusum=0; Average<double> uavg; void init() { state1.pot.key = Energy::Energybase::OLD; // this is the old energy (current) state2.pot.key = Energy::Energybase::NEW; // this is the new energy (trial) state1.pot.init(); state2.pot.init(); dusum=0; Change c; c.all=true; state2.sync(state1, c); uinit = state1.pot.energy(c); // Hack in reference to state1 in speciation for (auto base : moves.vec) { auto derived = std::dynamic_pointer_cast<Move::SpeciationMove<Tspace>>(base); if (derived) derived->setOther(state1.spc); } assert(state1.pot.energy(c) == state2.pot.energy(c)); } public: Move::Propagator<Tspace> moves; auto& pot() { return state1.pot; } auto& space() { return state1.spc; } const auto& pot() const { return state1.pot; } const auto& space() const { return state1.spc; } const auto& geometry() const { return state1.spc.geo; } const auto& particles() const { return state1.spc.p; } double drift() { Change c; c.all=true; double ufinal = state1.pot.energy(c); return ( ufinal-(uinit+dusum) ) / uinit; } //!< Calculates the relative energy drift from initial configuration MCSimulation(const json &j, MPI::MPIController &mpi) : state1(j), state2(j), moves(j, state2.spc, mpi) { init(); } void store(json &j) const { j = state1.spc; j["random-move"] = Move::Movebase::slump; j["random-global"] = Faunus::random; } // store system to json object void restore(const json &j) { state1.spc = j; state2.spc = j; Move::Movebase::slump = j["random-move"]; // restore move random number generator Faunus::random = j["random-global"]; // restore global random number generator //reactions<Tpvec> = j.at("reactionlist").get<decltype(reactions<Tpvec>)>(); // should be handled by space init(); } //!< restore system from previously store json object void move() { Change change; for (int i=0; i<moves.repeat(); i++) { auto mv = moves.sample(); // pick random move if (mv != moves.end() ) { change.clear(); (**mv).move(change); if (!change.empty()) { double unew, uold, du; #pragma omp parallel sections { #pragma omp section { unew = state2.pot.energy(change); } #pragma omp section { uold = state1.pot.energy(change); } } du = unew - uold; double bias = (**mv).bias(change, uold, unew) + Nchem( state2.spc, state1.spc , change); if ( metropolis(du + bias) ) { // accept move state1.sync( state2, change ); (**mv).accept(change); } else { // reject move state2.sync( state1, change ); (**mv).reject(change); du=0; } dusum+=du; // sum of all energy changes } } } } void to_json(json &j) { j = state1.spc.info(); j["temperature"] = pc::temperature / 1.0_K; j["moves"] = moves; j["energy"].push_back(state1.pot); } }; template<class Tgeometry, class Tparticle> void to_json(json &j, MCSimulation<Tgeometry,Tparticle> &mc) { mc.to_json(j); } /** * @brief add documentation..... * * @f[ * \beta U = \ln ( \sum N_o!/N_n! \exp([N_n - N_o]\beta \mu) V^{N_n - N_o} ) * @f] * * @todo * - Rename to something more descriptive * - use exception message to suggest how to fix the problem */ template<typename Tspace> double Nchem( Tspace &spc_n, Tspace &spc_o, const Change &change) { double NoverO=0; if ( change.dNpart ) {// Have the number of any molecules changed for ( auto &m : change.groups ) { int N_o = 0; int N_n = 0; if ( !m.dNpart && !molecules<std::vector<typename Tspace::Tparticle>>[ spc_n.groups[m.index].id ].atomic) { // Molecular species auto mollist_n = spc_n.findMolecules(m.index, Tspace::ACTIVE); auto mollist_o = spc_o.findMolecules(m.index, Tspace::ACTIVE); N_n=size(mollist_n); N_o=size(mollist_o); } if ( m.dNpart ) { auto mollist_n = spc_n.findMolecules(spc_n.groups[m.index].id, Tspace::ALL); if ( size(mollist_n) > 1 ) throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto mollist_o = spc_o.findMolecules(spc_o.groups[m.index].id, Tspace::ALL); if ( size(mollist_o) > 1 ) throw std::runtime_error("Bad definition: One group per atomic molecule!"); // Below is safe due to the catches above // add consistency criteria with m.atoms.size() == N N_n = mollist_n.begin()->size(); N_o = mollist_o.begin()->size(); } int dN = N_n - N_o; if (dN!=0) { double V_n = spc_n.geo.getVolume(); double V_o = spc_o.geo.getVolume(); double betamu = molecules<std::vector<typename Tspace::Tparticle>>[ spc_n.groups[m.index].id ].activity; if (std::fabs(betamu) < 1e-20) betamu = std::log( betamu / 1.0_molar ); if (dN>0) for (int n=0; n < dN; n++) NoverO += -std::log( (N_o + 1 + n) / ( V_n * 1.0_molar )) + betamu; else if (dN<0) for (int n=0; n < (-dN); n++) NoverO += std::log( (N_o - n) / ( V_n * 1.0_molar )) - betamu; } } } return -NoverO; // negative sign since Pref exp{-beta(dU)} = exp{-beta(dU -ln(Pref)} } }//Faunus namespace
NonlinearSolver_Lambda_Base.h
/* +-----------------------------------+ | | | *** Lambda nonlinear solver *** | | | | Copyright (c) -tHE SWINe- 2015 | | | | NonlinearSolver_Lambda_Base.h | | | +-----------------------------------+ */ #pragma once #ifndef __NONLINEAR_SOLVER_LAMBDA_UTILS #define __NONLINEAR_SOLVER_LAMBDA_UTILS /** * @file include/slam/NonlinearSolver_Lambda_Base.h * @brief utilitites for nonlinear solvers working above the lambda matrix * @author -tHE SWINe- * @date 2015-06-25 */ #include "slam/BlockMatrix.h" //#include <numeric> /** \addtogroup nlsolve * @{ */ /** * @def __NONLINEAR_SOLVER_LAMBDA_DUMP_CHI2 * @brief enables writes of chi2 errors at each step */ //#define __NONLINEAR_SOLVER_LAMBDA_DUMP_CHI2 /** * @def __NONLINEAR_SOLVER_LAMBDA_DUMP_ICRA2013_ANIMATION_DATA * @brief dump ICRA 2013 slam race data */ //#define __NONLINEAR_SOLVER_LAMBDA_DUMP_ICRA2013_ANIMATION_DATA /** * @def __NONLINEAR_SOLVER_LAMBDA_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA * @brief dump RSS 2013 matrix animation data */ //#define __NONLINEAR_SOLVER_LAMBDA_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA /** * @brief utilities for lambda solvers */ namespace lambda_utils { /** * @brief static assertion helper * @brief b_expression is expression being asserted */ template <bool b_expression> class CReductionPlanAssert { public: typedef void BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST; /**< @brief static assertion tag */ }; /** * @brief static assertion helper (specialization for assertion failed) */ template <> class CReductionPlanAssert<false> {}; /** * @brief calculates std::set memory allocation size estimate * @tparam P is set payload type * @param[in] r_set is the set to estimate memory size of * @return Returns the approximate size of the given set, in bytes. */ template <class P> static size_t n_Set_Allocation_Size(const std::set<P> &r_set) { return (sizeof(P) + sizeof(int) + 2 * sizeof(void*)) * // size of a node (payload, red/black, children) ((r_set.size() * 2) / 3) + sizeof(std::set<P>); // number of leafs + other nodes + size of the struct } /** * @brief calculates std::map memory allocation size estimate * * @tparam K is map key type * @tparam P is map payload type * * @param[in] r_map is the map to estimate memory size of * * @return Returns the approximate size of the given map, in bytes. */ template <class K, class P> static size_t n_Map_Allocation_Size(const std::map<K, P> &r_map) { return (sizeof(K) + sizeof(P) + sizeof(int) + 2 * sizeof(void*)) * // size of a node (key, payload, red/black, children) ((r_map.size() * 2) / 3) + sizeof(std::map<K, P>); // number of leafs + other nodes + size of the struct } // todo - consider not storing the reductions in a pool, instead store them in the map and keep a vector of pointers for parallel processing? /** * @brief right hand side vector reduction plan * * This takes care of summing up right hand side (residual) vector contributions from different edges. * In the v1 reduction plan this was done by the vertex class, each vertex had to keep a list of referencing * edges and each edge contained a vector for the r.h.s. contribution. This does not improve efficiency but * seems like a slightly cleaner solution. * * @tparam CDimsList is list of block sizes in lambda matrix */ template <class CDimsList> // todo - need clear! class CVectorReductionPlan { // t_odo - could we possibly move this to nl solver lambda.h? // seems like we did public: typedef typename CTransformTypelist<CDimsList, fbs_ut::CEigenToDimension>::_TyResult _TyBlockSizeList; /**< @brief list of block sizes in lambda */ typedef typename CUniqueTypelist<typename CFilterTypelist1<_TyBlockSizeList, fbs_ut::CIsSquare>::_TyResult>::_TyResult _TyVertDimsList; /**< @brief list of square block sizes (corresponding to vertices) */ typedef typename CTransformTypelist<_TyVertDimsList, fbs_ut::CTransformDimensionColumnsToSize>::_TyResult _TyDimensionList; /**< @brief list of vertex dimensions (as fbs_ut::CCTSize, not 2D) */ /** * @brief reduction plan parameters, stored as enum */ enum { n_reductor_num = CTypelistLength<_TyDimensionList>::n_result, /**< @brief number of different reduction sizes */ n_pool_page_size = 4096, /**< @brief reduction pool page size */ n_pool_memory_align = 0 /**< @brief reduction pool memory alignment */ // see no benefit in alignment right now, this stores the TReduction elements, those do not need to be aligned }; protected: /** * @brief reduction description * @note This does not store reduction dimension; that is given by the index of the pool that this is found in. */ struct TReduction { size_t n_offset; /**< @brief offset in the right hand side vector */ std::vector<const double*> src_list; /**< @brief list of sources */ }; typedef forward_allocated_pool<TReduction, n_pool_page_size, n_pool_memory_align> _TyPool; /**< @brief pool for storing reduction info */ // provides random access and const pointers typedef std::map<size_t, TReduction*> _TyReductionMap; /**< @brief map of reductions, ordered by dest block address */ // note that the dest block address is duplicated here, maybe could use std::set with an appropriate comparison _TyPool m_p_reduction_pool[n_reductor_num]; /**< @brief list of reduction pools, one per each vertex dimension */ CUberBlockMatrix m_p_matrix[n_reductor_num]; /**< @brief list of data stores for the per-edge contributions, one per each vertex dimension */ // storage only (could we do the same with just a pool and an allocator from CUberBlockMatrix, that would be better, p_Get_DenseStorage() could be protected again) _TyReductionMap m_p_reduction[n_reductor_num]; /**< @brief list of reductions, one per each vertex dimension */ // need to have them sorted by size for loop unrolling /** * @brief reduction function object * @note This is used to calculate reduction of all the vertices' r.h.s. vectors at once. */ class CReduce { protected: const CVectorReductionPlan<CDimsList> *m_p_this; /**< @brief pointer to the parent reduction plan */ Eigen::VectorXd &m_r_dest; /**< @brief destination r.h.s. vector */ public: /** * @brief default constructor * * @param[in] p_this is pointer to the parent reduction plan * @param[out] r_dest is destination r.h.s. vector (filled once the function operator is invoked) */ inline CReduce(const CVectorReductionPlan<CDimsList> *p_this, Eigen::VectorXd &r_dest) :m_p_this(p_this), m_r_dest(r_dest) {} /** * @brief function operator; performs all the reductions of one vertex dimension * @tparam C1DSize is vertex size (a specialization of fbs_ut::CCTSize) */ template <class C1DSize> inline void operator ()() { typedef CFindTypelistItem<_TyDimensionList, C1DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index typedef typename Eigen::VectorBlock<Eigen::VectorXd, C1DSize::n_size> CDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<C1DSize::n_size, 1>::_TyConst CSrcMap; const _TyPool &reduction_pool = m_p_this->m_p_reduction_pool[CSearch::n_index]; size_t _n = reduction_pool.size(); _ASSERTE(_n <= INT_MAX); int n = int(_n); #pragma omp parallel for if(n > 50) // todo - dynamic schedule and below as well for(int i = 0; i < n; ++ i) { const TReduction &r_red = reduction_pool[i]; CDestMap dest_map = m_r_dest.segment<C1DSize::n_size>(r_red.n_offset); CSrcMap src0_map(r_red.src_list.front()); dest_map = src0_map; // can this be used? can the first block still use the original block inside the matrix, without having to use a temporary? probably not. todo for(size_t j = 1, m = r_red.src_list.size(); j < m; ++ j) dest_map += CSrcMap(r_red.src_list[j]); // reduce } } }; /** * @brief reduction function object * @note This is used to calculate reduction of a subset of vertices. */ class CReduceRange { protected: const CVectorReductionPlan<CDimsList> *m_p_this; /**< @brief pointer to the parent reduction plan */ Eigen::VectorXd &m_r_dest; /**< @brief destination r.h.s. vector */ size_t m_n_begin; /**< @brief zero-based index of the first element of the r.h.s. vector to calculate (not vertex id) */ size_t m_n_end; /**< @brief zero-based index of one past the last element of the r.h.s. vector to calculate (not vertex id) */ public: /** * @brief default constructor * * @param[in] p_this is pointer to the parent reduction plan * @param[out] r_dest is destination r.h.s. vector (filled once the function operator is invoked) * @param[in] n_begin is zero-based index of the first element of the r.h.s. vector to calculate (not vertex id) * @param[in] n_end is zero-based index of one past the last element of the r.h.s. vector to calculate (not vertex id) */ inline CReduceRange(const CVectorReductionPlan<CDimsList> *p_this, Eigen::VectorXd &r_dest, size_t n_begin, size_t n_end) :m_p_this(p_this), m_r_dest(r_dest), m_n_begin(n_begin), m_n_end(n_end) {} /** * @brief function operator; performs the selected reductions of one vertex dimension * @tparam C1DSize is vertex size (a specialization of fbs_ut::CCTSize) */ template <class C1DSize> inline void operator ()() { typedef CFindTypelistItem<_TyDimensionList, C1DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index typedef typename Eigen::VectorBlock<Eigen::VectorXd, C1DSize::n_size> CDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<C1DSize::n_size, 1>::_TyConst CSrcMap; const _TyReductionMap &reduction_map = m_p_this->m_p_reduction[CSearch::n_index]; typename _TyReductionMap::const_iterator p_begin_it = reduction_map.lower_bound(m_n_begin); typename _TyReductionMap::const_iterator p_end_it = reduction_map.upper_bound(m_n_end); // find range of reductions of the selected size to perform for(; p_begin_it != p_end_it; ++ p_begin_it) { const TReduction &r_red = *(*p_begin_it).second; CDestMap dest_map = m_r_dest.segment<C1DSize::n_size>(r_red.n_offset); CSrcMap src0_map(r_red.src_list.front()); dest_map = src0_map; // can this be used? can the first block still use the original block inside the matrix, without having to use a temporary? probably not. // not sure what was wrong with it, this works. for(size_t j = 1, m = r_red.src_list.size(); j < m; ++ j) dest_map += CSrcMap(r_red.src_list[j]); // reduce } // can't do this easily in parallel } }; public: /** * @brief destructor; performs consistency checks in debug */ ~CVectorReductionPlan() { #ifdef _DEBUG int p_dims_list[n_reductor_num]; fbs_ut::Copy_CTSizes_to_Array<_TyDimensionList>(p_dims_list); // convert the typelist to an array so that we can index it at runtime std::vector<std::pair<size_t, int> > allocated_segments; for(int i = 0; i < n_reductor_num; ++ i) { const int n_dim = p_dims_list[i]; //_ASSERTE(!i || n_dim > p_dims_list[i - 1]); // it is not sorted, only unique, would have to check differently const _TyReductionMap &reduction_map = m_p_reduction[i]; for(typename _TyReductionMap::const_iterator p_begin_it = reduction_map.begin(), p_end_it = reduction_map.end(); p_begin_it != p_end_it; ++ p_begin_it) { const TReduction &r_red = *(*p_begin_it).second; std::pair<size_t, int> segment(r_red.n_offset, n_dim); allocated_segments.push_back(segment); } } // collect allocated segments as (offset, dimension) pairs std::sort(allocated_segments.begin(), allocated_segments.end()); // sort the segments (they are sorted for each dimension, merging them explicitly // would be more efficient but also error prone; this is debug, correctness is paramount) _ASSERTE(std::unique(allocated_segments.begin(), allocated_segments.end()) == allocated_segments.end()); // make sure there are no duplicates _ASSERTE(allocated_segments.empty() || !allocated_segments.front().first); // make sure that the first segment starts at zero for(size_t i = 1, n = allocated_segments.size(); i < n; ++ i) _ASSERTE(allocated_segments[i].first == allocated_segments[i - 1].first + allocated_segments[i - 1].second); // if this triggers, most likely there are vertices in the system which are not observed; please, if you have unobserved vertices, do not put them in the system just yet; add them in the system once there are corresponding observations // todo - make an earlier assert which makes sure that a more verbose message is shown to the user // make sure that the next segment starts where the previous one ends #endif // _DEBUG } /** * @brief gets the maximum reduced dimension * @return Returns the maximum reduced dimension, in elements. * @note This is also the expected size of the r.h.s. vector. */ size_t n_Max_Dimension() const { int p_dims_list[n_reductor_num]; fbs_ut::Copy_CTSizes_to_Array<_TyDimensionList>(p_dims_list); // convert the typelist to an array so that we can index it at runtime size_t n_max = 0; for(int i = 0; i < n_reductor_num; ++ i) { if(!m_p_reduction[i].empty()) { typename _TyReductionMap::const_iterator p_back_it = -- m_p_reduction[i].end(); // no .back() on this thing const TReduction &r_red = *(*p_back_it).second; size_t n_last = r_red.n_offset + p_dims_list[i]; if(n_max < n_last) n_max = n_last; } } // find the one past the last element that will be written return n_max; } /** * @brief calculates the size of this object in memory * @return Returns the size of this object (and of all associated * arrays or buffers) in memory, in bytes. */ size_t n_Allocation_Size() const { size_t n_size = sizeof(CVectorReductionPlan<CDimsList>); size_t n_data_size = 0, n_maps_size = 0, n_pools_size = 0, n_vectors_size = 0, n_vectors_slack = 0; for(int i = 0; i < n_reductor_num; ++ i) { n_data_size += m_p_matrix[i].n_Allocation_Size() - sizeof(CUberBlockMatrix); n_maps_size += n_Map_Allocation_Size(m_p_reduction[i]) - sizeof(m_p_reduction[i]); n_pools_size += m_p_reduction_pool[i].capacity() * sizeof(TReduction) + m_p_reduction_pool[i].page_num() * sizeof(TReduction*); for(size_t j = 0, m = m_p_reduction_pool[i].size(); j < m; ++ j) { const std::vector<const double*> src_list = m_p_reduction_pool[i][j].src_list; n_vectors_size += src_list.capacity() * sizeof(const double*); n_vectors_slack += (src_list.capacity() - src_list.size()) * sizeof(const double*); } } return n_size + n_data_size + n_maps_size + n_pools_size + n_vectors_size; } /** * @brief gets a temporary vector assigned for reduction * * @tparam n_dimension is size of the requested vector (must match one of vertex dimensions) * * @param[in] n_vector_offset is offset in the r.h.s. vector, in elements * * @return Returns pointer to the assigned memory (guaranteed not to change, * deleted at the end of the lifetime of this object). * * @note Note that the reduction conflicts are unchecked here (could have multiple block * sizes reducing in the same destination or could have overlapping blocks). * @note This function throws std::bad_alloc. */ template <const int n_dimension> double *p_Get_ReductionBlock(size_t n_vector_offset) // throw(std::bad_alloc) { typedef fbs_ut::CCTSize<n_dimension> C1DBlockSize; typedef CFindTypelistItem<_TyDimensionList, C1DBlockSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to p_GetTempBlock is already linked to use a specific index _TyReductionMap &r_reduction_list = m_p_reduction[CSearch::n_index]; _TyPool &r_pool = m_p_reduction_pool[CSearch::n_index]; typename _TyReductionMap::iterator p_red_it = r_reduction_list.find(n_vector_offset); if(p_red_it == r_reduction_list.end()) { r_pool.resize(r_pool.size() + 1); TReduction *p_red = &(*(r_pool.end() - 1)); p_red->n_offset = n_vector_offset; // remember _ASSERTE(p_red->src_list.empty()); // should be initialized and empty p_red_it = r_reduction_list.insert(std::pair<const size_t, TReduction*>(n_vector_offset, p_red)).first; } TReduction &r_red = *(*p_red_it).second; //r_red.src_list.reserve(r_red.src_list.size() + 1); // this is a major bottleneck!! CUberBlockMatrix &r_matrix = m_p_matrix[CSearch::n_index]; double *p_block = r_matrix.p_Get_DenseStorage(n_dimension); r_red.src_list.push_back(p_block); // might throw, then the block is orphaned, but we quit anyway return p_block; // store result for reduction here } /** * @brief reduce all the blocks (runs in parallel, where available) * @param[out] r_dest is the destination vector (must be allocated by the caller) * @note This does not check the integrity of the reduction; if initialized * incorrectly, some parts of the vector can be left uninitialized. */ void ReduceAll(Eigen::VectorXd &r_dest) const { _ASSERTE(r_dest.rows() == n_Max_Dimension()); // check the size of the vector CTypelistForEach<_TyDimensionList, CReduce>::Run(CReduce(this, r_dest)); } /** * @brief reduce all the blocks (runs in parallel, where available) * * @param[out] r_dest is the destination vector (must be allocated by the caller) * @param[in] n_begin is zero-based index of the first element of the r.h.s. vector to calculate (not vertex id) * @param[in] n_end is zero-based index of one past the last element of the r.h.s. vector to calculate (not vertex id) * * @note This does not check if the begin / end boundaries match vertex boundaries. Upper bound function * is used to find the nearest conservative boundaries (slightly more elements are updated if the * begin / end is misaligned). * @note This does not check the integrity of the reduction; if initialized * incorrectly, some parts of the vector can be left uninitialized. */ void ReduceRange(Eigen::VectorXd &r_dest, size_t n_begin, size_t n_end) const // ReduceSingle() would be easier to implement and could run in parallel { _ASSERTE(r_dest.rows() == n_Max_Dimension()); // check the size of the vector CTypelistForEach<_TyDimensionList, CReduceRange>::Run(CReduceRange(this, r_dest, n_begin, n_end)); } /** * @brief reduce a single block * * @tparam n_dimension is size of the requested vector (must match one of vertex dimensions) * * @param[out] r_dest is the destination vector (must be allocated by the caller) * @param[in] n_order is order of the vertex to reduce (offset in the r.h.s. vector, in elements) * * @note This assumes that a vertex with the given order is in the reduction plan and that the * dimension is correct (only checked in debug). */ template <const int n_dimension> void Reduce_Single(Eigen::VectorXd &r_dest, size_t n_order) const { _ASSERTE(r_dest.rows() == n_Max_Dimension()); // check the size of the vector typedef fbs_ut::CCTSize<n_dimension> C1DBlockSize; typedef CFindTypelistItem<_TyDimensionList, C1DBlockSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to p_GetTempBlock is already linked to use a specific index const _TyReductionMap &r_reduction_list = m_p_reduction[CSearch::n_index]; typename _TyReductionMap::const_iterator p_it = r_reduction_list.find(n_order); _ASSERTE(p_it != r_reduction_list.end()); // it should not typedef typename Eigen::VectorBlock<Eigen::VectorXd, n_dimension> CDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_dimension, 1>::_TyConst CSrcMap; const TReduction &r_red = *(*p_it).second; CDestMap dest_map = r_dest.segment<n_dimension>(r_red.n_offset); CSrcMap src0_map(r_red.src_list.front()); dest_map = src0_map; // can this be used? can the first block still use the original block inside the matrix, without having to use a temporary? probably not. todo for(size_t j = 1, m = r_red.src_list.size(); j < m; ++ j) dest_map += CSrcMap(r_red.src_list[j]); // reduce } }; /** * @brief matrix reduction key type selection and traits * * For reduced block identification, one can use either pointer to the original * block in lambda or its coordinates. Using coordinates has minor advantages for * the edge system, as most of the edges do not have the pointer to the original * block, instead they store a pointer to a reduced block and getting the original * then requiers <tt>O(log n)</tt> lookup. * * @tparam b_use_block_coord_as_reduction_key is key type selector */ template <bool b_use_block_coord_as_reduction_key> class CMatrixReductionKey_Traits { public: typedef const double *TKey; /**< @brief key type */ /** * @brief utility function; distills key from block description * * @param[in] n_row is zero-based block row (unused) * @param[in] n_col is zero-based block column (unused) * @param[in] p_address is block address * * @return Returns key of the given block. */ static TKey t_MakeKey(size_t UNUSED(n_row), size_t UNUSED(n_col), const double *p_address) { return p_address; } }; /** * @brief matrix reduction key type selection and traits (specialization for coordinate-based keys) */ template <> class CMatrixReductionKey_Traits<true> { public: typedef std::pair<size_t, size_t> TKey; /**< @brief key type */ /** * @brief utility function; distills key from block description * * @param[in] n_row is zero-based block row * @param[in] n_col is zero-based block column * @param[in] p_address is block address (unused) * * @return Returns key of the given block. */ static TKey t_MakeKey(size_t n_row, size_t n_col, const double *UNUSED(p_address)) { return std::make_pair(n_row, n_col); } }; /** * @brief parallel reduction plan for efficiently calculating and updating the hessian matrix * * @tparam CDimsList is list of hessian matrix block dimensions, as fbs_ut::CCTSize2D * * @note This uses block dimensions to differentiate between blocks, assumes that there will * be no conflicts between blocks of different dimensions (does not check for that). * @todo Redesign the pointers to be objects that wrap the pointer and remove the illusion of * being constant (the shared pointers may change upon block conflict without the knowledge * of the object owning it). */ template <class CDimsList> class CMatrixReductionPlan { // todo - need clear! public: typedef typename CTransformTypelist<CDimsList, fbs_ut::CEigenToDimension>::_TyResult _TyDimensionList; /**< @brief list of hessian matrix block dimensions, as fbs_ut::CCTSize2D */ /** * @brief parameters, stored as enum */ enum { n_reductor_num = CTypelistLength<_TyDimensionList>::n_result, /**< @brief number of different reduction sizes */ n_pool_page_size = 4096, /**< @brief reduction pool page size */ n_pool_memory_align = 0, /**< @brief reduction pool memory alignment */ // see no benefit in alignment right now, this stores the TReduction elements, those do not need to be aligned b_use_block_coord_keys = 1 /**< @brief if set, use <tt>(row, col)</tt> coordinates instead of pointers to identify the blocks */ }; typedef typename CMatrixReductionKey_Traits<b_use_block_coord_keys != 0>::TKey TKey; /**< @brief block key type */ /** * @brief reduction description * @note This does not store block dimensions; that is given by the index of the pool that this is found in. */ struct TReduction { double *p_dest; /**< @brief destination block in the lambda matrix */ std::vector<const double*> src_list; /**< @brief list of reduced blocks */ }; typedef forward_allocated_pool<TReduction, n_pool_page_size, n_pool_memory_align> _TyPool; /**< @brief pool for storing reduction info */ // provides random access and const pointers typedef std::map<TKey, TReduction*> _TyReductionMap; /**< @brief map of reductions, ordered by dest block address / coords */ // sorted by dest block address / coords typedef std::map<const double*, double**> _TyOwnerLookup; /**< @brief reverse block lookup */ // sorted by dest block address protected: _TyOwnerLookup m_p_owner_lookup[n_reductor_num]; /**< @brief list of reverse block lookups, one per each vertex dimension @note This only contains records for the un-conflicted owners; it eliminates copying the Jacobian contribution from per-edge memory to the matrix (size 1 reduction). @note Only the off-diagonal block owners are stored here. The diagonal ones where conflicts are anticipated do not have size 1 reduction elimination. @note In SLAM, there can be some conflicts in the off-diagonal blocks (multiple edges between the same vertices, e.g. from different sensors, re-observation, etc.). In BA, there typically aren't. */ _TyPool m_p_reduction_pool[n_reductor_num]; /**< @brief list of reduction pools, one per each vertex dimension */ // actually need that for parallel processing _TyReductionMap m_p_reduction_list[n_reductor_num]; /**< @brief list of reductions, one per each vertex dimension */ CUberBlockMatrix m_p_matrix[n_reductor_num]; /**< @brief list of data stores for the per-edge contributions, one per each vertex dimension */ // storage only (could we do the same with just a pool and an allocator from CUberBlockMatrix, that would be better, p_Get_DenseStorage() could be protected again) /** * @brief reduction function object */ class CReduce { protected: const CMatrixReductionPlan<CDimsList> *m_p_this; /**< @brief pointer to the parent reduction plan */ public: /** * @brief default constructor * @param[in] p_this is pointer to the parent reduction plan */ inline CReduce(const CMatrixReductionPlan<CDimsList> *p_this) :m_p_this(p_this) {} /** * @brief function operator; performs all the reductions of a single Jacobian dimension * @tparam C2DSize is Jacobian size (a specialization of fbs_ut::CCTSize2D) */ template <class C2DSize> inline void operator ()() { typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index typedef typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_Ty CDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_TyConst CSrcMap; const _TyPool &reduction_pool = m_p_this->m_p_reduction_pool[CSearch::n_index]; size_t _n = reduction_pool.size(); _ASSERTE(_n <= INT_MAX); int n = int(_n); #pragma omp parallel for if(n > 50) // todo - export the parallel thresh as an arg, maybe consider dynamic schedule for(int i = 0; i < n; ++ i) { const TReduction &r_red = reduction_pool[i]; CDestMap dest_map((double*)r_red.p_dest); CSrcMap src0_map(r_red.src_list.front()); dest_map = src0_map; // can this be used? can the first block still use the original block inside the matrix, without having to use a temporary? probably not. todo for(size_t j = 1, m = r_red.src_list.size(); j < m; ++ j) dest_map += CSrcMap(r_red.src_list[j]); // reduce } } }; public: /** * @brief calculates the size of this object in memory * @return Returns the size of this object (and of all associated * arrays or buffers) in memory, in bytes. */ size_t n_Allocation_Size() const { size_t n_size = sizeof(CMatrixReductionPlan<CDimsList>); size_t n_data_size = 0, n_maps_size = 0, n_pools_size = 0, n_vectors_size = 0, n_vectors_slack = 0; for(int i = 0; i < n_reductor_num; ++ i) { n_data_size += m_p_matrix[i].n_Allocation_Size() - sizeof(CUberBlockMatrix); n_maps_size += n_Map_Allocation_Size(m_p_owner_lookup[i]) - sizeof(m_p_owner_lookup[i]); n_maps_size += n_Map_Allocation_Size(m_p_reduction_list[i]) - sizeof(m_p_reduction_list[i]); n_pools_size += m_p_reduction_pool[i].capacity() * sizeof(TReduction) + m_p_reduction_pool[i].page_num() * sizeof(TReduction*); for(size_t j = 0, m = m_p_reduction_pool[i].size(); j < m; ++ j) { const std::vector<const double*> src_list = m_p_reduction_pool[i][j].src_list; n_vectors_size += src_list.capacity() * sizeof(const double*);; n_vectors_slack += (src_list.capacity() - src_list.size()) * sizeof(const double*); } } return n_size + n_data_size + n_maps_size + n_pools_size + n_vectors_size; } /** * @brief gets a single reduction block in case we know it is most likely going to be conflicted (like the blocks at the diagonal) * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] n_row is zero-based block row * @param[in] n_col is zero-based block column * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * * @return Returns pointer to a temporary block from which the values will be reduced to the specified destination. * * @note This function throws std::bad_alloc. */ template <class C2DSize> inline double *p_Diagonal_GetTempBlock(size_t n_row, size_t n_col, double *p_reduction_dest) // throw(std::bad_alloc) { //_ASSERTE(n_row == n_col); // it does not have to be diagonal, it is just a type of block where collision is anticipated in most of the blocks TKey t_key = t_MakeKey(n_row, n_col, p_reduction_dest); typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to p_GetTempBlock is already linked to use a specific index _TyReductionMap &r_reduction_list = m_p_reduction_list[CSearch::n_index]; CUberBlockMatrix &r_storage = m_p_matrix[CSearch::n_index]; _TyPool &r_pool = m_p_reduction_pool[CSearch::n_index]; return p_GetSingle(r_reduction_list, r_storage, r_pool, C2DSize::n_row_num * C2DSize::n_column_num, t_key, p_reduction_dest); // just push those three on the stack and go } /** * @brief gets a single reduction block in case we know it is most likely going to be unique (like the off-diagonal blocks) * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * @param[in] p_owner_storage is pointer to the pointer to address of the block inside the * owner object (is liable to be changed upon conflict) * * @return Returns pointer to the original block (no conflict occurred yet). * * @note This should be used in case the block did not exist in the matrix (<tt>b_uninitialized</tt> is set). * @note This assumes that there is only a single pointer to the block stored, which can be replaced * by the pointer to a pointer, passed as the second argument. In case this does not apply, this * reductor cannot be used. * @note This function throws std::bad_alloc. */ template <class C2DSize> inline double *p_OffDiagonal_GetTempBlock(double *p_reduction_dest, double **p_owner_storage) // throw(std::bad_alloc) { #ifdef _DEBUG typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to p_GetTempBlock is already linked to use a specific index _TyReductionMap &r_reduction_list = m_p_reduction_list[CSearch::n_index]; _ASSERTE(b_use_block_coord_keys || r_reduction_list.find(t_MakeKey(0, 0, p_reduction_dest)) == r_reduction_list.end()); // if b_use_block_coord_keys is set, we can't verify anything // make sure the block is not there #endif // _DEBUG Set_BlockOwner<C2DSize>(p_reduction_dest, p_owner_storage); return p_reduction_dest; } /** * @brief gets a single reduction block in case we know it is most likely going to be unique (like the off-diagonal blocks) * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] n_row is zero-based block row * @param[in] n_col is zero-based block column * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * * @return Returns pointer to a temporary block from which the values will be reduced to the specified destination. * * @note This should be used in case the block did already exist in the matrix (<tt>b_uninitialized</tt> is not set). * @note This function throws std::bad_alloc. */ template <class C2DSize> inline double *p_OffDiagonal_GetTempBlock(size_t n_row, size_t n_col, double *p_reduction_dest) { TKey t_key = t_MakeKey(n_row, n_col, p_reduction_dest); std::pair<double*, double*> t_storage = t_GetTempBlock<C2DSize>(t_key, p_reduction_dest); if(t_storage.second) { double **p_owner_variable = p_Get_BlockOwner<C2DSize>(p_reduction_dest, true); _ASSERTE(p_owner_variable != 0); // we are replacing the owner, there should be one registered *p_owner_variable = t_storage.first; // the original owner should use the first temp block memcpy(t_storage.first, p_reduction_dest, C2DSize::n_row_num * C2DSize::n_column_num * sizeof(double)); // this block might already have significant value, need to store it in the temp block return t_storage.second; // the second reducer should use the second temp block } else { _ASSERTE(!p_Get_BlockOwner<C2DSize>(p_reduction_dest, false)); // there is no owner anymore, we already replaced it return t_storage.first; // nothing special, just another block } } /** * @brief reduce all the blocks (runs in parallel, where available) */ void ReduceAll() const { /*eigen::MatrixXd::identity(); for(size_t i = 0; i < n_reductor_num; ++ i) { // todo - do typelist_foreach first, to have fixed block size ;) (will probably lose for loop scheduling though) _TyReductionMap &r_reduction_list = m_p_reduction_list[i]; _TyReductionMap::iterator p_it = r_reduction_list.begin(); _TyReductionMap::iterator p_end_it = r_reduction_list.end(); for(; p_it != p_end_it; ++ p_it) { // duh; how to parallelize that? can't. } }*/ // can do everything in parallel, need to see which strategy is the fastest CTypelistForEach<_TyDimensionList, CReduce>::Run(CReduce(this)); } /** * @brief reduce a single block * * Reduce a single block address, this is callable by an edge, probably does * good enough job in incremental updates where a single edge / a couple edges * is added. It will recalculate a couple of vertex' sums, but it should still * be less than all of them, as it is now. * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * @param[in] t_key is block key (address / row col coordinate) * @note Note that this can not run in parallel over the edges. to do it in parallel, * one would need to collect the *unique* block addresses and then run in parallel. */ template <class C2DSize> void ReduceSingle(TKey t_key) const { typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to p_GetTempBlock is already linked to use a specific index const _TyReductionMap &r_reduction_list = m_p_reduction_list[CSearch::n_index]; // get the list typename _TyReductionMap::const_iterator p_red_it = r_reduction_list.find(t_key); if(p_red_it == r_reduction_list.end()) return; // the block is not there (may happen with off-diagonal blocks, which do not have a conflict yet and do not need to be reduced) const TReduction &r_red = *(*p_red_it).second; _ASSERTE(!r_red.src_list.empty()); // should never be (but there can be 1, in case of a lonely vertex where collision was expected but did not occur (yet)) // get the particular reduction typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_Ty dest_map((double*)r_red.p_dest); typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_TyConst src0_map(r_red.src_list.front()); dest_map = src0_map; for(size_t i = 1, n = r_red.src_list.size(); i < n; ++ i) { typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_TyConst src_map(r_red.src_list[i]); dest_map += src_map; } // reduce } /** * @brief utility function for making key * * @param[in] n_row is zero-based block row * @param[in] n_col is zero-based block column * @param[in] p_address is block address * * @return Returns key of the given block. * * @note Depending on which type of key is used, some of the arguments are ignored. */ static inline TKey t_MakeKey(size_t n_row, size_t n_col, const double *p_address) { return CMatrixReductionKey_Traits<b_use_block_coord_keys != 0>::t_MakeKey(n_row, n_col, p_address); } protected: /** * @brief gets one or two blocks, based on whether there would be a conflict * * Get a temp block, and in case p_reduction_dest was not present in the list yet, * alloc also the second block for the original owner of p_reduction_dest (which * wrote directly to the matrix, bypassing the reduction). * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] t_key is block key (address / row col coordinate) * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * * @return Returns a pair of reduced block storages, first is always a valid pointer and * second may be set to null if the block was already shared before. * * @note This function throws std::bad_alloc. */ template <class C2DSize> inline std::pair<double*, double*> t_GetTempBlock(TKey t_key, double *p_reduction_dest) // throw(std::bad_alloc) { typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index _TyReductionMap &r_reduction_list = m_p_reduction_list[CSearch::n_index]; CUberBlockMatrix &r_storage = m_p_matrix[CSearch::n_index]; _TyPool &r_pool = m_p_reduction_pool[CSearch::n_index]; return t_GetPair(r_reduction_list, r_storage, r_pool, C2DSize::n_row_num * C2DSize::n_column_num, t_key, p_reduction_dest); // just push those three on the stack and go //_ASSERTE(t_pair.second && (m_p_owner_lookup[CSearch::n_index].find(p_reduction_dest) != // m_p_owner_lookup[CSearch::n_index].end())); // if there is second, there was a reduction conflict and the block should be owned (but not vice versa) //return t_pair; } /** * @brief sets pointer to the block owner pointer storage * * This is used to bypass size 1 reductions in the blocks where the conflicts are unlikely. * This adds a record of the original pointer in the matrix, and a pointer to the pointer * to this block in the edge class, so that when a conflict occurs, the pointer in the original * owner edge can be modified. This mandates that pointers to the blocks be stored only in * a single instance (as multiple pointer instances can't be modified like this). * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] p_block_addr is pointer to the first element of the block inside the matrix * @param[in] p_owner is pointer to the pointer to address of the block inside the * owner object (is liable to be changed upon conflict) * * @note This should be used in case the block did not exist in the matrix (<tt>b_uninitialized</tt> is set). * @note This assumes that there is only a single pointer to the block stored, which can be replaced * by the pointer to a pointer, passed as the second argument. In case this does not apply, this * reductor cannot be used. * @note This function throws std::bad_alloc. */ template <class C2DSize> void Set_BlockOwner(const double *p_block_addr, double **p_owner) // throw(std::bad_alloc) { typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index _TyOwnerLookup &r_lookup_map = m_p_owner_lookup[CSearch::n_index]; _ASSERTE(r_lookup_map.find(p_block_addr) == r_lookup_map.end()); // make sure that there is no owner so far r_lookup_map[p_block_addr] = p_owner; // stores the original block woner } /** * @brief gets a pointer to block owner pointer * * @tparam C2DSize is block size as fbs_ut::CCTSize2D * * @param[in] p_block_addr is pointer to the first element of the block inside the matrix * @param[in] b_clear_owner is clear owner flag (if set, and the order exists, it is ) * * @return Returns pointer to the pointer inside the owner of the specified block, or null if the * block does not have an owner (or has multiple owners and the first owner was cleared). */ template <class C2DSize> double **p_Get_BlockOwner(const double *p_block_addr, bool b_clear_owner = true) { typedef CFindTypelistItem<_TyDimensionList, C2DSize> CSearch; // look for the size typedef typename CReductionPlanAssert<CSearch::b_result>::BLOCK_SIZE_NOT_PRESENT_IN_THE_LIST _TyAssert0; // make sure it is there // the search happens at compile-time, each call to t_GetTempBlock is already linked to use a specific index _TyOwnerLookup &r_lookup_map = m_p_owner_lookup[CSearch::n_index]; _TyOwnerLookup::iterator p_it = r_lookup_map.find(p_block_addr); if(p_it == r_lookup_map.end()) return 0; // no such owner double **p_owner = (*p_it).second; if(b_clear_owner) r_lookup_map.erase(p_it); // remove, now all edges will use the reductor anyway return p_owner; // find the owner } /** * @brief gets a single reduced block * * @param[in,out] r_reduction_list is reduction map * @param[in,out] r_storage is matrix where the reduced blocks are allocated * @param[in,out] r_pool is a pool for reduction description objects * @param[in] n_size is size of the requested block, in elements * @param[in] t_key is block key (address / row col coordinate) * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * * @return Returns a pointer to reduced block storage. * * @note This function throws std::bad_alloc. */ double *p_GetSingle(_TyReductionMap &r_reduction_list, CUberBlockMatrix &r_storage, _TyPool &r_pool, int n_size, TKey t_key, double *p_reduction_dest) // throw(std::bad_alloc) { typename _TyReductionMap::iterator p_red_it = r_reduction_list.find(t_key); if(p_red_it == r_reduction_list.end()) { r_pool.resize(r_pool.size() + 1); TReduction *p_red = &(*(r_pool.end() - 1)); p_red->p_dest = p_reduction_dest; // remember _ASSERTE(p_red->src_list.empty()); // should be initialized and empty p_red_it = r_reduction_list.insert(std::pair<const TKey, TReduction*>(t_key, p_red)).first; } std::vector<const double*> &r_list = (*p_red_it).second->src_list; double *p_ptr = r_storage.p_Get_DenseStorage(n_size); r_list.push_back(p_ptr); return p_ptr; } /** * @brief gets one or two blocks, based on whether there would be a conflict * * @param[in,out] r_reduction_list is reduction map * @param[in,out] r_storage is matrix where the reduced blocks are allocated * @param[in,out] r_pool is a pool for reduction description objects * @param[in] n_size is size of the requested block, in elements * @param[in] t_key is block key (address / row col coordinate) * @param[in] p_reduction_dest is pointer to the first element of the block inside the matrix * * @return Returns a pair of reduced block storages, first is always a valid pointer and * second may be set to null if the block was already shared before. * * @note This function throws std::bad_alloc. */ std::pair<double*, double*> t_GetPair(_TyReductionMap &r_reduction_list, CUberBlockMatrix &r_storage, _TyPool &r_pool, int n_size, TKey t_key, double *p_reduction_dest) // throw(std::bad_alloc) { typename _TyReductionMap::iterator p_red_it = r_reduction_list.find(t_key); if(p_red_it == r_reduction_list.end()) { r_pool.resize(r_pool.size() + 1); TReduction *p_red = &(*(r_pool.end() - 1)); p_red->p_dest = p_reduction_dest; // remember _ASSERTE(p_red->src_list.empty()); // should be initialized and empty p_red_it = r_reduction_list.insert(std::pair<const TKey, TReduction*>(t_key, p_red)).first; } std::vector<const double*> &r_list = (*p_red_it).second->src_list; if(!r_list.empty()) { double *p_ptr = r_storage.p_Get_DenseStorage(n_size); r_list.push_back(p_ptr); return std::make_pair(p_ptr, (double*)0); // just another block } else { double *p_ptr0 = r_storage.p_Get_DenseStorage(n_size); double *p_ptr1 = r_storage.p_Get_DenseStorage(n_size); // must call twice, want each of them aligned r_list.push_back(p_ptr0); r_list.push_back(p_ptr1); return std::make_pair(p_ptr0, p_ptr1); // the first time around: alloc two blocks } } }; /** * @brief wrapper reduction plans for lambda and the right-hand-side vector * * @tparam CDimsList is list of hessian matrix block dimensions, as fbs_ut::CCTSize2D */ template <class CDimsList> class CLambdaReductionPlan { public: typedef CVectorReductionPlan<CDimsList> CRHSReductor; /**< @brief right hand side vector reduction plan type */ typedef CMatrixReductionPlan<CDimsList> CLambdaReductor; /**< @brief lambda reduction plan type */ protected: CVectorReductionPlan<CDimsList> m_vec_plan; /**< @brief right hand side vector reduction plan */ CMatrixReductionPlan<CDimsList> m_mat_plan; /**< @brief lambda reduction plan */ public: /** * @brief gets right hand side vector reduction plan * @return Returns a reference to the right hand side vector reduction plan. */ inline CRHSReductor &r_RHS_ReductionPlan() { return m_vec_plan; } /** * @brief gets lambda reduction plan * @return Returns a reference to the lambda reduction plan. */ inline CLambdaReductor &r_Lambda_ReductionPlan() { return m_mat_plan; } /** * @brief gets right hand side vector reduction plan * @return Returns a const reference to the right hand side vector reduction plan. */ inline const CRHSReductor &r_RHS_ReductionPlan() const { return m_vec_plan; } /** * @brief gets lambda reduction plan * @return Returns a const reference to the lambda reduction plan. */ inline const CLambdaReductor &r_Lambda_ReductionPlan() const { return m_mat_plan; } /** * @brief calculates the size of this object in memory * @return Returns the size of this object (and of all associated * arrays or buffers) in memory, in bytes. */ size_t n_Allocation_Size() const { return m_vec_plan.n_Allocation_Size() + m_mat_plan.n_Allocation_Size(); } }; /** * @brief v1 lambda solver utility function * @tparam CDimsList is list of lambda matrix block sizes, as fbs_ut::CCTSize2D */ template <class CDimsList> class CLambdaOps : public nonlinear_detail::CSolverOps_Base { public: typedef CDimsList _TyLambdaMatrixBlockSizes; /**< @brief list of lambda matrix block sizes, as fbs_ut::CCTSize2D */ struct _TyReductionPlan {}; /**< @brief reduction plan type (an empty class) */ // the v1 lambda did not need reduction plan, it was stored in the edges / vertices /** * @brief function object that calls lambda hessian block allocation for all edges */ class CAlloc_HessianBlocks { protected: CUberBlockMatrix &m_r_lambda; /**< @brief reference to the lambda matrix (out) */ public: /** * @brief default constructor * @param[in] r_lambda is reference to the lambda matrix */ inline CAlloc_HessianBlocks(CUberBlockMatrix &r_lambda) :m_r_lambda(r_lambda) {} /** * @brief function operator * @tparam _TyVertexOrEdge is vertex or edge type * @param[in,out] r_vertex_or_edge is vertex or edge to have hessian blocks allocated in lambda * @note This function throws std::bad_alloc. */ template <class _TyVertexOrEdge> inline void operator ()(_TyVertexOrEdge &r_vertex_or_edge) // throw(std::bad_alloc) { r_vertex_or_edge.Alloc_HessianBlocks(m_r_lambda); } }; /** * @brief function object that calculates hessians in all the edges */ class CCalculate_Hessians { public: /** * @brief function operator * @tparam _TyVertexOrEdge is vertex or edge type * @param[in] r_t_vertex_or_edge is vertex or edge to update it's hessians */ template <class _TyVertexOrEdge> inline void operator ()(_TyVertexOrEdge &r_t_vertex_or_edge) const { r_t_vertex_or_edge.Calculate_Hessians(); } }; /** * @brief function object that calls b vector calculation for all edges */ class CCollect_RightHandSide_Vector { protected: Eigen::VectorXd &m_r_b; /**< @brief reference to the right-hand side vector (out) */ public: /** * @brief default constructor * @param[in] r_b is reference to the right-hand side vector */ inline CCollect_RightHandSide_Vector(Eigen::VectorXd &r_b) :m_r_b(r_b) {} /** * @brief function operator * @tparam _TyVertex is vertex type * @param[in,out] r_t_vertex is a vertex to output its part R error vector */ template <class _TyVertex> inline void operator ()(const _TyVertex &r_t_vertex) // throw(std::bad_alloc) { r_t_vertex.Get_RightHandSide_Vector(m_r_b); } }; public: /** * @brief incrementally updates the lambda matrix structure (can be empty) * * @tparam CSystem is optimized system type * * @param[in,out] r_system is optimized system * @param[in] r_reduction_plan is reduction plan (unused in v1) * @param[in,out] r_lambda is reference to the lambda matrix * @param[in] n_vertices_already_in_lambda is number of vertices which are already in the matrix * @param[in] n_edges_already_in_lambda is number of edges which are already in the matrix * * @note This function throws std::bad_alloc. */ template <class CSystem> static inline void Extend_Lambda(CSystem &r_system, _TyReductionPlan &UNUSED(r_reduction_plan), CUberBlockMatrix &r_lambda, size_t n_vertices_already_in_lambda, size_t n_edges_already_in_lambda) // throw(std::bad_alloc) { if(!n_vertices_already_in_lambda && !n_edges_already_in_lambda) AddEntriesInSparseSystem(r_system, r_lambda); // works for empty else UpdateSparseSystem(r_system, r_lambda, n_vertices_already_in_lambda, n_edges_already_in_lambda); // does not work for empty // create block matrix lambda #ifdef _DEBUG /*{ CUberBlockMatrix A; const Eigen::MatrixXd &r_t_uf = r_system.r_t_Unary_Factor(); if(!A.Append_Block(r_t_uf, 0, 0)) throw std::bad_alloc(); // add unary factor r_system.r_Edge_Pool().For_Each(CAlloc_JacobianBlocks(A)); // add all the hessian blocks CUberBlockMatrix lambda_ref; A.PreMultiplyWithSelfTransposeTo(lambda_ref, true); // only upper diag! // calculate lambda = AtA if(!r_lambda.b_EqualStructure(lambda_ref)) { lambda_ref.Rasterize("lambda1_reference_structure.tga"); r_lambda.Rasterize("lambda0_structure.tga"); } _ASSERTE(r_lambda.b_EqualStructure(lambda_ref)); // make sure the matrix has the same structure }*/ #endif // _DEBUG } /** * @brief refreshes the lambda matrix by recalculating edge hessians * * @tparam CSystem is optimized system type * * @param[in,out] r_system is optimized system * @param[in] r_reduction_plan is reduction plan (unused in v1) * @param[in,out] r_lambda is reference to the lambda matrix * @param[in] n_referesh_from_vertex is zero-based index of the first vertex to refresh * @param[in] n_refresh_from_edge is zero-based index of the first edge to refresh * * @note This throws std::bad_alloc and std::runtime_error; */ template <class CSystem> static inline void Refresh_Lambda(CSystem &r_system, _TyReductionPlan &UNUSED(r_reduction_plan), CUberBlockMatrix &r_lambda, size_t n_referesh_from_vertex = 0, size_t n_refresh_from_edge = 0) // throw(std::bad_alloc, std::runtime_error) { if(n_refresh_from_edge) { r_system.r_Edge_Pool().For_Each_Parallel(n_refresh_from_edge, r_system.r_Edge_Pool().n_Size(), CCalculate_Hessians()); } else { r_system.r_Edge_Pool().For_Each_Parallel(CCalculate_Hessians()); } if(n_referesh_from_vertex) { r_system.r_Vertex_Pool().For_Each_Parallel(n_referesh_from_vertex, r_system.r_Vertex_Pool().n_Size(), CCalculate_Hessians()); } else { r_system.r_Vertex_Pool().For_Each_Parallel(CCalculate_Hessians()); } // can do this in parallel if(!CSystem::null_UnaryFactor && !n_referesh_from_vertex) { #ifdef __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO size_t n_first_vertex_id = 0; // simple #else // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Edge_Pool().b_Empty()); size_t n_first_vertex_id = r_system.r_Edge_Pool()[0].n_Vertex_Id(0); #endif // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Vertex_Pool()[n_first_vertex_id].b_IsConstant()); // this one must not be constant size_t n_first_vertex_order = r_system.r_Vertex_Pool()[n_first_vertex_id].n_Order(); // get id of the first vertex (usually zero) const Eigen::MatrixXd &r_t_uf = r_system.r_t_Unary_Factor(); if(!r_t_uf.cols()) throw std::runtime_error("system matrix assembled but unary factor not initialized yet"); // if this triggers, consider sorting your dataset or using an explicit CUnaryFactor r_lambda.t_FindBlock(n_first_vertex_order, n_first_vertex_order).noalias() += r_t_uf.transpose() * r_t_uf; } // add unary factor (gets overwritten by the first vertex' block) #ifdef _DEBUG /*{ CUberBlockMatrix A; const Eigen::MatrixXd &r_t_uf = r_system.r_t_Unary_Factor(); if(!A.Append_Block(r_t_uf, 0, 0)) throw std::bad_alloc(); // add unary factor r_system.r_Edge_Pool().For_Each(CAlloc_JacobianBlocks(A)); // add all the hessian blocks r_system.r_Edge_Pool().For_Each_Parallel(CCalculate_Jacobians()); // calculate the values as well CUberBlockMatrix lambda_ref; A.PreMultiplyWithSelfTransposeTo(lambda_ref, true); // only upper diag! // calculate lambda = AtA if(!r_lambda.b_Equal(lambda_ref, 1e-3)) { r_lambda.Rasterize("lambda2_values.tga"); lambda_ref.Rasterize("lambda3_reference_values.tga"); CUberBlockMatrix &diff = lambda_ref; r_lambda.AddTo(diff, -1); diff.Rasterize("lambda4_diff_values.tga"); fprintf(stderr, "error: lambda and it's reference have different value\n"); exit(-1); } _ASSERTE(r_lambda.b_EqualStructure(lambda_ref)); // make sure the matrix has the same structure }*/ #endif // _DEBUG } /** * @brief calculates the right-hand side vector * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system * @param[in] r_reduction_plan is reduction plan (unused in v1) * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &r_system, const _TyReductionPlan &UNUSED(r_reduction_plan), Eigen::VectorXd &r_v_b) { r_system.r_Vertex_Pool().For_Each_Parallel(CCollect_RightHandSide_Vector(r_v_b)); // can do this in parallel // collect b } /** * @brief calculates a segment of the right-hand side vector, corresponding to a range of vertices * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system * @param[in] r_reduction_plan is reduction plan (unused in v1) * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) * @param[in] n_begin is zero-based index of the first vertex to calculate the r.h.s. for * @param[in] n_end is zero-based index of one past the last vertex to calculate the r.h.s. for */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &r_system, const _TyReductionPlan &UNUSED(r_reduction_plan), Eigen::VectorXd &r_v_b, size_t n_begin, size_t n_end) { r_system.r_Vertex_Pool().For_Each_Parallel(n_begin, n_end, CCollect_RightHandSide_Vector(r_v_b)); // can do this in parallel // collect b } /** * @brief calculates a segment of the right-hand side vector, corresponding to a single vertex * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system * @param[in] r_reduction_plan is reduction plan (unused in v1) * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) * @param[in] n_vertex is zero-based index of the vertex to calculate the r.h.s. for */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &r_system, const _TyReductionPlan &r_reduction_plan, Eigen::VectorXd &r_v_b, size_t n_vertex) { r_system.r_Vertex_Pool().For_Each(n_vertex, n_vertex + 1, CCollect_RightHandSide_Vector(r_v_b)); // this may not be the most efficient way // collect b } protected: /** * @brief creates the lambda matrix from scratch * * @tparam CSystem is optimized system type * * @param[in,out] r_system is optimized system * @param[in,out] r_lambda is reference to the lambda matrix * * @note This function throws std::bad_alloc and std::runtime_error. */ template <class CSystem> static inline void AddEntriesInSparseSystem(CSystem &r_system, CUberBlockMatrix &r_lambda) // throw(std::bad_alloc, std::runtime_error) { #if 0 if(r_system.r_Edge_Pool().n_Size() > 1000) { // wins 2.42237 - 2.48938 = .06701 seconds on 10k.graph, likely more on larger graphs //printf("building large matrix from scratch ...\n"); // debug std::vector<size_t> row_cumsum_list(r_system.r_Edge_Pool().n_Size()); /*std::vector<size_t>::iterator p_end_it =*/ r_system.r_Edge_Pool().For_Each(CGetCumsums(row_cumsum_list)); //_ASSERTE(p_end_it == row_cumsum_list.end()); // collect cumsums CUberBlockMatrix tmp(row_cumsum_list.begin(), row_cumsum_list.end(), r_system.r_Vertex_Pool().n_Size()); r_lambda.Swap(tmp); // use this one instead // todo - see if there are some row_reindex on 100k, fix it by collecting // cumsums and building matrix with that (proven to be faster before) } else #endif // 0 // todo - need to write function that gets cumsums from vertices (it's not difficult) { //printf("building small matrix from scratch ...\n"); // debug r_lambda.Clear(); // ... } if(!CSystem::null_UnaryFactor) { #ifdef __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO size_t n_first_vertex_id = 0; // simple #else // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Edge_Pool().b_Empty()); size_t n_first_vertex_id = r_system.r_Edge_Pool()[0].n_Vertex_Id(0); #endif // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Vertex_Pool()[n_first_vertex_id].b_IsConstant()); // this one must not be constant size_t n_first_vertex_order = r_system.r_Vertex_Pool()[n_first_vertex_id].n_Order(); // get id of the first vertex (usually zero) const Eigen::MatrixXd &r_t_uf = r_system.r_t_Unary_Factor(); if(!r_t_uf.cols()) throw std::runtime_error("system matrix assembled but unary factor not initialized yet"); // if this triggers, consider sorting your dataset or using an explicit CUnaryFactor if(!r_lambda.Append_Block(Eigen::MatrixXd(r_t_uf.transpose() * r_t_uf), n_first_vertex_order, n_first_vertex_order)) throw std::bad_alloc(); } // add unary factor (actually UF^T * UF, it was square-rooted before) // note that the unary error cannot be easily added without introducing a dummy // edge that would add itself as a reference to vertex 0 if(r_system.r_v_Unary_Error().squaredNorm() > 0) throw std::runtime_error("unary error is not supported by the v1 reduction plan"); // this is slightly obsolete so we will not support it for now r_system.r_Edge_Pool().For_Each(CAlloc_HessianBlocks(r_lambda)); r_system.r_Vertex_Pool().For_Each(CAlloc_HessianBlocks(r_lambda)); // add all the hessian blocks //printf("building lambda from scratch finished\n"); // debug } /** * @brief incrementally updates the lambda matrix structure (must not be empty) */ template <class CSystem> static inline void UpdateSparseSystem(CSystem &r_system, CUberBlockMatrix &r_lambda, size_t n_skip_vertices, size_t n_skip_edges) // throw(std::bad_alloc) { _ASSERTE(r_lambda.n_Row_Num() > 0 && r_lambda.n_Column_Num() == r_lambda.n_Row_Num()); // make sure lambda is not empty r_system.r_Edge_Pool().For_Each(n_skip_edges, r_system.r_Edge_Pool().n_Size(), CAlloc_HessianBlocks(r_lambda)); r_system.r_Vertex_Pool().For_Each(n_skip_vertices, r_system.r_Vertex_Pool().n_Size(), CAlloc_HessianBlocks(r_lambda)); // add the hessian blocks of the new edges } }; /** * @brief v2 lambda solver utility function * @tparam CDimsList is list of lambda matrix block sizes, as fbs_ut::CCTSize2D */ template <class CDimsList> class CLambdaOps2 : public nonlinear_detail::CSolverOps_Base { public: typedef CDimsList _TyLambdaMatrixBlockSizes; /**< @brief list of lambda matrix block sizes, as fbs_ut::CCTSize2D */ typedef CLambdaReductionPlan<CDimsList> _TyReductionPlan; /**< @brief reduction plan type */ /** * @brief function object that calls lambda hessian block allocation for all edges */ class CAlloc_HessianBlocks_v2 { protected: CUberBlockMatrix &m_r_lambda; /**< @brief reference to the lambda matrix (out) */ _TyReductionPlan &m_r_redplan; /**< @brief reference to the reduction plan */ public: /** * @brief default constructor * * @param[in,out] r_lambda is reference to the lambda matrix (modified once the function operator is invoked) * @param[in,out] r_redplan is reference to the reduction plan (modified once the function operator is invoked) */ inline CAlloc_HessianBlocks_v2(CUberBlockMatrix &r_lambda, _TyReductionPlan &r_redplan) :m_r_lambda(r_lambda), m_r_redplan(r_redplan) {} /** * @brief function operator * @tparam _TyEdge is edge type * @param[in,out] r_t_edge is edge to have hessian blocks allocated in lambda * @note This function throws std::bad_alloc. */ template <class _TyEdge> inline void operator ()(_TyEdge &r_t_edge) // throw(std::bad_alloc) { r_t_edge.Alloc_HessianBlocks_v2(m_r_lambda, m_r_redplan); } }; /** * @brief function object that calculates hessians in all the edges */ class CCalculate_Hessians_v2 { public: /** * @brief function operator * @tparam _TyEdge is edge type * @param[in] r_t_edge is edge to update it's hessians */ template <class _TyEdge> inline void operator ()(_TyEdge &r_t_edge) const { r_t_edge.Calculate_Hessians_v2(); } }; /** * @brief function object that calculates hessians in the selected edges */ class CUpdate_Hessians_v2 { protected: const CUberBlockMatrix &m_r_lambda; /**< @brief reference to the lambda matrix */ _TyReductionPlan &m_r_redplan; /**< @brief reference to the reduction plan */ bool m_b_recalc; /**< @brief Jacobian recalculation flag */ public: /** * @brief default constructor * * @param[in] r_lambda is reference to the lambda matrix * @param[in,out] r_redplan is reference to the reduction plan (executed once the function operator is invoked) * @param[in] b_recalc is Jacobian recalculation flag (if set, the edge Jacobians are recalculated prior to the reduction) */ inline CUpdate_Hessians_v2(const CUberBlockMatrix &r_lambda, _TyReductionPlan &r_redplan, bool b_recalc) :m_r_lambda(r_lambda), m_r_redplan(r_redplan), m_b_recalc(b_recalc) {} /** * @brief function operator * @tparam _TyEdge is edge type * @param[in] r_t_edge is edge to update it's hessians */ template <class _TyEdge> inline void operator ()(_TyEdge &r_t_edge) const { if(m_b_recalc) // may choose to do that in parallel earlier r_t_edge.Calculate_Hessians_v2(); // calculate r_t_edge.Reduce_Hessians_v2(m_r_lambda, m_r_redplan); // reduce (may overwrite an earlier reduce // of a shared jacobian, but that one was incomplete because r_t_edge.Calculate_Hessians_v2() // was not called yet on this edge) } }; /** * @brief unary factor helper * * The size of UF depends on the dimension of the first vertex (unknown), * it needs to be found at runtime in the block size typelist. This is a * callback for CTypelistItemBFind. */ class CAddUnaryFactor { protected: _TyReductionPlan &m_r_rp; /**< @brief reference to lambda and RHS reductor */ const Eigen::MatrixXd &m_r_t_uf; /**< @brief const reference to the unary factor */ const Eigen::VectorXd &m_r_t_uerr; /**< @brief const reference to the unary error */ double *m_p_uf_block; /**< @brief pointer to the hessian block of the first vertex in lambda */ size_t m_n_vertes_id; /**< @brief id of the anchor vertex the unary factor will be applied to */ public: /** * @brief default constructor * * @param[in] r_rp is reference to lambda / RHS reductors * @param[in] r_t_uf is const reference to the unary factor * @param[in] r_t_uerr is const reference to the unary error * @param[in] p_uf_block is pointer to the hessian block of the first vertex in lambda * @param[in] n_vertes_id is id of the anchor vertex the unary factor will be applied to */ inline CAddUnaryFactor(_TyReductionPlan &r_rp, const Eigen::MatrixXd &r_t_uf, const Eigen::VectorXd &r_t_uerr, double *p_uf_block, size_t n_vertes_id) :m_r_rp(r_rp), m_r_t_uf(r_t_uf), m_r_t_uerr(r_t_uerr), m_p_uf_block(p_uf_block), m_n_vertes_id(n_vertes_id) { _ASSERTE(p_uf_block); } /** * @brief callback operator; adds the unary factor ro the reduction plan * @tparam C2DSize is size of the unary factor */ template <class C2DSize> void operator ()() { double *p_temp = m_r_rp.r_Lambda_ReductionPlan().template p_Diagonal_GetTempBlock<C2DSize>(m_n_vertes_id, m_n_vertes_id, m_p_uf_block); // get a temp reduction block for the unary factor typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, C2DSize::n_column_num>::_Ty dest(p_temp); dest.noalias() = m_r_t_uf.transpose() * m_r_t_uf; // copy UF if(m_r_t_uerr.squaredNorm() > 0) { double *p_temp_vec = m_r_rp.r_RHS_ReductionPlan().template p_Get_ReductionBlock<C2DSize::n_row_num>(m_n_vertes_id); // get a temp reduction block for the unary error typename CUberBlockMatrix::CMakeMatrixRef<C2DSize::n_row_num, 1>::_Ty vec(p_temp_vec); vec = m_r_t_uerr; // copy the error } // if norm of the unary error is nonzero (only special applications, it usualy is zero) } /** * @brief adds unary factor to the reduction plan * * @param[in] r_rp is reference to lambda reductor * @param[in] r_t_uf is const reference to the unary factor * @param[in] r_t_uerr is const reference to the unary error * @param[in] r_lambda is reference to lambda * @param[in] n_vertes_id is id of the anchor vertex the unary factor will be applied to * * @note This can only be used when the structure of lambda is fully allocated. */ static void Add_UnaryFactor(_TyReductionPlan &r_rp, const Eigen::MatrixXd &r_t_uf, const Eigen::VectorXd &r_t_uerr, CUberBlockMatrix &r_lambda, size_t n_vertes_id) { double *p_UF_block = r_lambda.p_GetBlock_Log(n_vertes_id, n_vertes_id, r_t_uf.rows(), r_t_uf.cols(), true, false); CAddUnaryFactor add_uf(r_rp, r_t_uf, r_t_uerr, p_UF_block, n_vertes_id); CTypelistItemBFind<typename CSortTypelist<_TyLambdaMatrixBlockSizes, fbs_ut::CCompareSize2D_Less>::_TyResult, fbs_ut::CRuntimeCompareSize2D, std::pair<size_t, size_t>, CAddUnaryFactor>::FindExisting(std::make_pair(r_t_uf.rows(), r_t_uf.cols()), add_uf); } /** * @brief adds unary factor to the reduction plan * * @param[in] r_rp is reference to lambda reductor * @param[in] r_t_uf is const reference to the unary factor * @param[in] r_t_uerr is const reference to the unary error * @param[in] r_lambda is reference to lambda * @param[in] n_vertes_id is id of the anchor vertex the unary factor will be applied to * @param[in] n_vertes_order is order of the vertex with id n_vertes_id */ static void Add_UnaryFactor(_TyReductionPlan &r_rp, const Eigen::MatrixXd &r_t_uf, const Eigen::VectorXd &r_t_uerr, CUberBlockMatrix &r_lambda, size_t n_vertes_id, size_t n_vertes_order) { double *p_UF_block = r_lambda.p_FindBlock(n_vertes_order, n_vertes_order, r_t_uf.rows(), r_t_uf.cols(), true, false); CAddUnaryFactor add_uf(r_rp, r_t_uf, r_t_uerr, p_UF_block, n_vertes_id); CTypelistItemBFind<typename CSortTypelist<_TyLambdaMatrixBlockSizes, fbs_ut::CCompareSize2D_Less>::_TyResult, fbs_ut::CRuntimeCompareSize2D, std::pair<size_t, size_t>, CAddUnaryFactor>::FindExisting(std::make_pair(r_t_uf.rows(), r_t_uf.cols()), add_uf); } }; public: /** * @brief incrementally updates the lambda matrix structure (can be empty) * * @tparam CSystem is optimized system type * * @param[in,out] r_system is optimized system * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_lambda is reference to the lambda matrix * @param[in] n_vertices_already_in_lambda is number of vertices which are already in the matrix * @param[in] n_edges_already_in_lambda is number of edges which are already in the matrix * * @note This function throws std::bad_alloc. */ template <class CSystem> static inline void Extend_Lambda(CSystem &r_system, _TyReductionPlan &r_reduction_plan, CUberBlockMatrix &r_lambda, size_t n_vertices_already_in_lambda, size_t n_edges_already_in_lambda) // throw(std::bad_alloc) { if(!n_vertices_already_in_lambda && !n_edges_already_in_lambda) AddEntriesInSparseSystem(r_system, r_reduction_plan, r_lambda); // works for empty else { UpdateSparseSystem(r_system, r_reduction_plan, r_lambda, n_vertices_already_in_lambda, n_edges_already_in_lambda); // does not work for empty } // create block matrix lambda } /** * @brief refreshes the lambda matrix by recalculating edge hessians * * @tparam CSystem is optimized system type * * @param[in,out] r_system is optimized system * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_lambda is reference to the lambda matrix * @param[in] n_refresh_from_vertex is zero-based index of the first vertex to refresh (unused) * @param[in] n_refresh_from_edge is zero-based index of the first edge to refresh */ template <class CSystem> static inline void Refresh_Lambda(CSystem &r_system, _TyReductionPlan &r_reduction_plan, CUberBlockMatrix &r_lambda, size_t UNUSED(n_refresh_from_vertex) = 0, size_t n_refresh_from_edge = 0) { size_t n_edge_num = r_system.r_Edge_Pool().n_Size(); size_t n_new_edge_num = n_edge_num - n_refresh_from_edge; const size_t n_parallel_thresh = 50; if(n_refresh_from_edge) { if(n_new_edge_num > n_parallel_thresh) r_system.r_Edge_Pool().For_Each_Parallel(n_refresh_from_edge, n_edge_num, CCalculate_Hessians_v2(), 0); // always run in parallel } else r_system.r_Edge_Pool().For_Each_Parallel(CCalculate_Hessians_v2()); // can do this in parallel if(n_refresh_from_edge) { if(n_new_edge_num > n_parallel_thresh) { r_system.r_Edge_Pool().For_Each/*_Parallel*/(n_refresh_from_edge, r_system.r_Edge_Pool().n_Size(), CUpdate_Hessians_v2(r_lambda, r_reduction_plan, false)/*, 0*/); // always run in parallel, the hessians are already calculated // reduce only, can't do this in parallel since the reductions do not use temporaries // and although they would have the same results, they will not have the same results // if reducing in parallel into the same variable. would have to use a temporary. but // we already have the hessians precalculated so at least that is saved. } else { r_system.r_Edge_Pool().For_Each(n_refresh_from_edge, r_system.r_Edge_Pool().n_Size(), CUpdate_Hessians_v2(r_lambda, r_reduction_plan, true)); // not in parallel, recalculate the hessians as well // calculate and reduce, *not* in parallel } } else r_reduction_plan.r_Lambda_ReductionPlan().ReduceAll(); // simple, parallel // run the reduction plan } /** * @brief calculates the right-hand side vector * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system (unused) * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &UNUSED(r_system), const _TyReductionPlan &r_reduction_plan, Eigen::VectorXd &r_v_b) { r_reduction_plan.r_RHS_ReductionPlan().ReduceAll(r_v_b); // collect b } /** * @brief calculates a segment of the right-hand side vector, corresponding to a range of vertices * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) * @param[in] n_begin is zero-based index of the first vertex to calculate the r.h.s. for * @param[in] n_end is zero-based index of one past the last vertex to calculate the r.h.s. for */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &r_system, const _TyReductionPlan &r_reduction_plan, Eigen::VectorXd &r_v_b, size_t n_begin, size_t n_end) { /*if(n_end - n_begin > 50) { // t_odo - parallel implementation using ReduceSingle // can't, would have to use a reduce to temporary and then overwrite the result }*/ n_begin = r_system.r_Vertex_Pool()[n_begin].n_Order(); typename CSystem::_TyConstVertexRef last = r_system.r_Vertex_Pool()[n_end - 1]; n_end = last.n_Order() + last.n_Dimension(); // need to convert from vertex indices to element indices r_reduction_plan.r_RHS_ReductionPlan().ReduceRange(r_v_b, n_begin, n_end); // collect b } protected: /** * @brief signle vertex r.h.s. reduction functor */ class CSingleRHSReducer { protected: const _TyReductionPlan &r_reduction_plan; /**< @brief reference to the reduction plan */ Eigen::VectorXd &r_v_b; /**< @brief reference to the r.h.s. vector */ size_t n_order; /**< @brief order of the reduced vertex */ public: /** * @brief default constructor * * @param[in] _r_reduction_plan is reference to the reduction plan * @param[in] _r_v_b is reference to the r.h.s. vector * @param[in] _n_order is order of the reduced vertex */ inline CSingleRHSReducer(const _TyReductionPlan &_r_reduction_plan, Eigen::VectorXd &_r_v_b, size_t _n_order) :r_reduction_plan(_r_reduction_plan), r_v_b(_r_v_b), n_order(_n_order) {} /** * @brief performs a single vertex r.h.s. reduction */ template <class CBlockSize> inline void operator ()() { r_reduction_plan.r_RHS_ReductionPlan().template Reduce_Single<CBlockSize::n_size>(r_v_b, n_order); } }; public: /** * @brief calculates a segment of the right-hand side vector, corresponding to a single vertex * * @tparam CSystem is optimized system type * * @param[in] r_system is optimized system * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_v_b is reference to the r.h.s. vector (needs to be * allocated by the caller to the appropriate dimension) * @param[in] n_vertex is zero-based index of the vertex to calculate the r.h.s. for */ template <class CSystem> static inline void Collect_RightHandSide_Vector(const CSystem &r_system, const _TyReductionPlan &r_reduction_plan, Eigen::VectorXd &r_v_b, size_t n_vertex) { typename CSystem::_TyConstVertexRef vertex = r_system.r_Vertex_Pool()[n_vertex]; fbs_ut::CWrap3<>::template In_ScalarSize_DecisionTree<typename _TyReductionPlan::CRHSReductor::_TyDimensionList>(vertex.n_Dimension(), CSingleRHSReducer(r_reduction_plan, r_v_b, vertex.n_Order())); } protected: /*class CSumEdgeDims { // profiling protected: size_t &m_r_n_sum; public: CSumEdgeDims(size_t &r_n_dim_sum) :m_r_n_sum(r_n_dim_sum) {} template <class _TyEdge> inline void operator ()(const _TyEdge &UNUSED(r_edge)) { m_r_n_sum += _TyEdge::n_residual_dimension; } };*/ public: /** * @brief functor which copies a cumulative sum of variable dimensions to an array */ class CCopyVariableDims { protected: std::vector<size_t>::iterator m_p_dest_it; /**< @brief iterator to write the next vertex dimension */ size_t m_n_prev_value; /**< @brief iterator to write the next vertex dimension */ public: /** * @brief default constructor * @param[in] r_dest is a reference to a vector to fill with vertex dimensions (must be allocated) */ CCopyVariableDims(std::vector<size_t> &r_dest) :m_p_dest_it(r_dest.begin()), m_n_prev_value(0) {} /** * @brief function operator; stores the variable dimension * @tparam _TyVertex is vertex type * @param[in] r_vertex is reference to the vertex (unused) */ template <class _TyVertex> inline void operator ()(const _TyVertex &UNUSED(r_vertex)) { *m_p_dest_it = m_n_prev_value += _TyVertex::n_dimension; // hopefully the compiler can optimize away the decision tree and just act on the types ++ m_p_dest_it; } }; protected: /** * @brief creates the lambda matrix from scratch * * @param[in,out] r_system is optimized system * @param[in] r_reduction_plan is reduction plan * @param[in,out] r_lambda is reference to the lambda matrix * * @note This function throws std::bad_alloc and std::runtime_error. */ template <class CSystem> static inline void AddEntriesInSparseSystem(CSystem &r_system, _TyReductionPlan &r_reduction_plan, CUberBlockMatrix &r_lambda) // throw(std::bad_alloc, std::runtime_error) { /*CDeltaTimer t; printf("one: %f\n", t.f_Time());*/ // profiling #if 0 r_lambda.Clear(); // ... #else // 0 { std::vector<size_t> row_col_cumsums(r_system.r_Vertex_Pool().n_Size()); #if 1 r_system.r_Vertex_Pool().For_Each(CCopyVariableDims(row_col_cumsums)); // serial, using type info, 0.000393 sec (on windows) // faster #else _ASSERTE(r_system.r_Vertex_Pool().n_Size() <= INT_MAX); #pragma omp parallel if(r_system.r_Vertex_Pool().n_Size() > 50) for(int i = 0, n = int(r_system.r_Vertex_Pool().n_Size()); i < n; ++ i) row_col_cumsums[i] = r_system.r_Vertex_Pool()[i].n_Dimension(); // parallel, using facades // 0.001340 sec (on windows) std::partial_sum(row_col_cumsums.begin(), row_col_cumsums.end(), row_col_cumsums.begin()); // inclusive #endif // get inclusive sum of the dimensions of the vertices //printf("two: %f\n", t.f_Time()); // profiling CUberBlockMatrix empty(row_col_cumsums.begin(), row_col_cumsums.end(), row_col_cumsums.begin(), row_col_cumsums.end()); r_lambda.Swap(empty); } // initialize the block rows and columns of the matrix, so that the order in which the edges are inserted // does not cause row reindexing (this is usually not a problem in SLAM, but it gets worse in BA) #endif // 0 /*printf("three: %f\n", t.f_Time()); { std::vector<size_t> edge_dims(r_system.r_Edge_Pool().n_Size()); size_t n_dim_sum = 0; r_system.r_Edge_Pool().For_Each(CSumEdgeDims(n_dim_sum)); printf("three.5: %f (" PRIsize ")\n", t.f_Time(), n_dim_sum); } n_dummy_param = 0; r_lambda.Reset_Perfcounters();*/ // profiling r_system.r_Edge_Pool().For_Each(CAlloc_HessianBlocks_v2(r_lambda, r_reduction_plan)); // add all the hessian blocks /*printf("four: %f (" PRIsize" / " PRIsize ")\n", t.f_Time(), n_dummy_param, r_lambda.n_Block_Num()); r_lambda.Dump_PerfCounters();*/ // profiling if(!CSystem::null_UnaryFactor) { #ifdef __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO size_t n_first_vertex_id = 0; // simple #else // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Edge_Pool().b_Empty()); size_t n_first_vertex_id = r_system.r_Edge_Pool()[0].n_Vertex_Id(0); #endif // __AUTO_UNARY_FACTOR_ON_VERTEX_ZERO _ASSERTE(!r_system.r_Vertex_Pool()[n_first_vertex_id].b_IsConstant()); // this one must not be constant size_t n_first_vertex_order = r_system.r_Vertex_Pool()[n_first_vertex_id].n_Order(); // get id of the first vertex (usually zero) const Eigen::MatrixXd &r_t_uf = r_system.r_t_Unary_Factor(); if(!r_t_uf.cols()) throw std::runtime_error("system matrix assembled but unary factor not initialized yet"); // if this triggers, consider sorting your dataset or using an explicit CUnaryFactor /*if(!r_lambda.Append_Block_Log(Eigen::MatrixXd(r_t_uf.transpose() * r_t_uf), n_first_vertex_id, n_first_vertex_id)) throw std::bad_alloc();*/ // no need to do this anymore CAddUnaryFactor::Add_UnaryFactor(r_reduction_plan, r_t_uf, r_system.r_v_Unary_Error(), r_lambda, n_first_vertex_id, n_first_vertex_order); // add unary factor (actually UF^T * UF, it was square-rooted before) } // add unary factor to the reductor so that we don't have to care about ir anymore // do this after the edges, as we are using Append_Block_Log() and hence we need // the row / column with n_first_vertex_id to exist (and if n_first_vertex_id > 0, // then it might not) //printf("five: %f\n", t.f_Time()); // profiling } /** * @brief incrementally updates the lambda matrix structure (must not be empty) */ template <class CSystem> static inline void UpdateSparseSystem(CSystem &r_system, _TyReductionPlan &r_reduction_plan, CUberBlockMatrix &r_lambda, size_t n_skip_vertices, size_t n_skip_edges) // throw(std::bad_alloc) { _ASSERTE(r_lambda.n_Row_Num() > 0 && r_lambda.n_Column_Num() == r_lambda.n_Row_Num()); // make sure lambda is not empty r_system.r_Edge_Pool().For_Each(n_skip_edges, r_system.r_Edge_Pool().n_Size(), CAlloc_HessianBlocks_v2(r_lambda, r_reduction_plan)); // add the hessian blocks of the new edges } }; } // ~lambda_utils /** @} */ // end of group #endif // !__NONLINEAR_SOLVER_LAMBDA_UTILS
hello_omp.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int tid; #pragma omp parallel private(tid) { tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); } }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; const Quantum *magick_restrict p, *magick_restrict pixels; Quantum *magick_restrict q; ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse) (void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, illuminant,&start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, illuminant,&stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; ssize_t i, y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j, n; n=0; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[j]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[j]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoTLS(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoTLS(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
Async.c
/* Filename: Async.c * Author: Mohammed Sourouri <[email protected]> * * Asnchronous state-of-the-art Multi-GPU code where the number of MPI processes * spawned equals the number of GPUs. All memory transfers are asynchronous. * Non-blocking MPI calls are used. This code corresponds to "MPI" results in * Figure-9 in the paper. * * * Copyright 2014 Mohammed Sourouri * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Sync.h" #define DEBUG #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) //////////////////////////////////////////////////////////////////////////////// // A method for checking error in CUDA calls //////////////////////////////////////////////////////////////////////////////// inline void __checkCuda(cudaError_t error, const char *file, const int line) { #if defined(DEBUG) || defined(_DEBUG) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } #endif return; } //////////////////////////////////////////////////////////////////////////////// // Program Main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int Nx, Ny, Nz, max_iters; int blockX, blockY, blockZ; if (argc == 8) { Nx = atoi(argv[1]); Ny = atoi(argv[2]); Nz = atoi(argv[3]); max_iters = atoi(argv[4]); blockX = atoi(argv[5]); blockY = atoi(argv[6]); blockZ = atoi(argv[7]); } else { printf("Usage: %s nx ny nz i block_x block_y block_z number_of_threads\n", argv[0]); exit(1); } // Get the number of GPUS int number_of_devices; checkCuda(cudaGetDeviceCount(&number_of_devices)); if (number_of_devices < 2) { printf("Less than two devices were found.\n"); printf("Exiting...\n"); return -1; } // Decompose along the Z-axis int _Nz = Nz/number_of_devices; // Define constants const _DOUBLE_ L = 1.0; const _DOUBLE_ h = L/(Nx+1); const _DOUBLE_ dt = h*h/6.0; const _DOUBLE_ beta = dt/(h*h); const _DOUBLE_ c0 = beta; const _DOUBLE_ c1 = (1-6*beta); // Check if ECC is turned on ECCCheck(number_of_devices); // Set the number of OpenMP threads omp_set_num_threads(4); #pragma omp parallel { unsigned int tid = omp_get_num_threads(); #pragma omp single { printf("Number of OpenMP threads: %d\n", tid); } } // CPU memory operations int dt_size = sizeof(_DOUBLE_); _DOUBLE_ *u_new, *u_old; u_new = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); u_old = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); init(u_old, u_new, h, Nx, Ny, Nz); // Allocate and generate arrays on the host size_t pitch_bytes; size_t pitch_gc_bytes; _DOUBLE_ *h_Unew, *h_Uold; _DOUBLE_ *h_s_Uolds[number_of_devices], *h_s_Unews[number_of_devices]; _DOUBLE_ *left_send_buffer[number_of_devices], *left_receive_buffer[number_of_devices]; _DOUBLE_ *right_send_buffer[number_of_devices], *right_receive_buffer[number_of_devices]; h_Unew = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); h_Uold = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); init(h_Uold, h_Unew, h, Nx, Ny, Nz); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); h_s_Unews[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2)); h_s_Uolds[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2)); right_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); left_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); right_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); left_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); checkCuda(cudaHostAlloc((void**)&h_s_Unews[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&h_s_Uolds[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&right_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&left_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&right_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&left_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); init_subdomain(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid); } // GPU memory operations _DOUBLE_ *d_s_Unews[number_of_devices], *d_s_Uolds[number_of_devices]; _DOUBLE_ *d_right_send_buffer[number_of_devices], *d_left_send_buffer[number_of_devices]; _DOUBLE_ *d_right_receive_buffer[number_of_devices], *d_left_receive_buffer[number_of_devices]; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid < 2) { checkCuda(cudaSetDevice(tid)); CopyToConstantMemory(c0, c1); checkCuda(cudaMallocPitch((void**)&d_s_Uolds[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2))); checkCuda(cudaMallocPitch((void**)&d_s_Unews[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2))); checkCuda(cudaMallocPitch((void**)&d_left_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_right_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_left_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_right_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); } } // GPU stream operations cudaStream_t compute_stream_2, compute_stream_3; cudaStream_t right_send_stream, left_send_stream; cudaStream_t right_receive_stream, left_receive_stream; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid == 0) { checkCuda(cudaSetDevice(0)); checkCuda(cudaStreamCreate(&right_send_stream)); checkCuda(cudaStreamCreate(&right_receive_stream)); } if (tid == 2) { checkCuda(cudaSetDevice(0)); checkCuda(cudaStreamCreate(&compute_stream_2)); } if (tid == 1) { checkCuda(cudaSetDevice(1)); checkCuda(cudaStreamCreate(&left_send_stream)); checkCuda(cudaStreamCreate(&left_receive_stream)); } if (tid == 3) { checkCuda(cudaSetDevice(1)); checkCuda(cudaStreamCreate(&compute_stream_3)); } } // Copy data from host to the device double HtD_timer = 0.; HtD_timer -= omp_get_wtime(); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid < 2) { checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(d_s_Uolds[tid], pitch_bytes, h_s_Uolds[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy2D(d_s_Unews[tid], pitch_bytes, h_s_Unews[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyHostToDevice)); } } HtD_timer += omp_get_wtime(); int pitch = pitch_bytes/dt_size; int gc_pitch = pitch_gc_bytes/dt_size; // GPU kernel launch parameters dim3 threads_per_block(blockX, blockY, blockZ); unsigned int blocksInX = getBlock(Nx, blockX); unsigned int blocksInY = getBlock(Ny, blockY); unsigned int blocksInZ = getBlock(_Nz-2, k_loop); dim3 thread_blocks(blocksInX, blocksInY, blocksInZ); dim3 thread_blocks_halo(blocksInX, blocksInY); unsigned int ghost_width = 1; double compute_timer = 0.; compute_timer -= omp_get_wtime(); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); for(int iterations = 0; iterations < max_iters; iterations++) { // Make sure that all threads start at the same time #pragma omp barrier // Compute right boundary data on device 0 if (tid == 0) { int kstart = (_Nz+1)-ghost_width; int kstop = _Nz+1; checkCuda(cudaSetDevice(0)); ComputeInnerPointsAsync(thread_blocks_halo, threads_per_block, right_send_stream, d_s_Unews[0], d_s_Uolds[0], pitch, Nx, Ny, _Nz, kstart, kstop); CopyBoundaryRegionToGhostCellAsync(thread_blocks_halo, threads_per_block, right_send_stream, d_s_Unews[0], d_right_send_buffer[0], Nx, Ny, _Nz, pitch, gc_pitch, 0); checkCuda(cudaMemcpy2DAsync(right_send_buffer[0], dt_size*(Nx+2), d_right_send_buffer[0], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDeviceToHost, right_send_stream)); } // Compute left boundary data on device 1 if (tid == 1) { int kstart = 1; int kstop = 1+ghost_width; checkCuda(cudaSetDevice(1)); ComputeInnerPointsAsync(thread_blocks_halo, threads_per_block, left_send_stream, d_s_Unews[1], d_s_Uolds[1], pitch, Nx, Ny, _Nz, kstart, kstop); CopyBoundaryRegionToGhostCellAsync(thread_blocks_halo, threads_per_block, left_send_stream, d_s_Unews[1], d_left_send_buffer[1], Nx, Ny, _Nz, pitch, gc_pitch, 1); checkCuda(cudaMemcpy2DAsync(left_send_buffer[1], dt_size*(Nx+2), d_left_send_buffer[1], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDeviceToHost, left_send_stream)); } // Compute inner nodes for device 0 if (tid == 2) { int kstart = 1; int kstop = (_Nz+1)-ghost_width; checkCuda(cudaSetDevice(0)); ComputeInnerPointsAsync(thread_blocks, threads_per_block, compute_stream_2, d_s_Unews[0], d_s_Uolds[0], pitch, Nx, Ny, _Nz, kstart, kstop); } // Compute inner nodes for device 1 if (tid == 3) { int kstart = 1+ghost_width; int kstop = _Nz+1; checkCuda(cudaSetDevice(1)); ComputeInnerPointsAsync(thread_blocks, threads_per_block, compute_stream_3, d_s_Unews[1], d_s_Uolds[1], pitch, Nx, Ny, _Nz, kstart, kstop); } #pragma omp barrier if (tid == 1) { while (true) { cudaSetDevice(0); if (cudaStreamQuery(right_send_stream) == cudaSuccess) { cudaSetDevice(1); checkCuda(cudaMemcpy2DAsync(d_left_receive_buffer[1], pitch_gc_bytes, right_send_buffer[0], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyHostToDevice, left_receive_stream)); CopyGhostCellToBoundaryRegionAsync(thread_blocks_halo, threads_per_block, left_receive_stream, d_s_Unews[1], d_left_receive_buffer[1], Nx, Ny, _Nz, pitch, gc_pitch, 1); break; } } } if (tid == 0) { while (true) { cudaSetDevice(1); if (cudaStreamQuery(left_send_stream) == cudaSuccess) { cudaSetDevice(0); checkCuda(cudaMemcpy2DAsync(d_right_receive_buffer[0], pitch_gc_bytes, left_send_buffer[1], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyHostToDevice, right_receive_stream)); CopyGhostCellToBoundaryRegionAsync(thread_blocks_halo, threads_per_block, right_receive_stream, d_s_Unews[0], d_right_receive_buffer[0], Nx, Ny, _Nz, pitch, gc_pitch, 0); break; } } } // Swap pointers on the host #pragma omp barrier if (tid < 2) { checkCuda(cudaDeviceSynchronize()); swap(_DOUBLE_*, d_s_Unews[tid], d_s_Uolds[tid]); } } } compute_timer += omp_get_wtime(); // Copy data from device to host double DtH_timer = 0; DtH_timer -= omp_get_wtime(); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid < 2) { checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(h_s_Uolds[tid], dt_size*(Nx+2), d_s_Uolds[tid], pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2), cudaMemcpyDeviceToHost)); } } DtH_timer += omp_get_wtime(); // Merge sub-domains into a one big domain #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid < 2) { merge_domains(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid); } } // Calculate on host #if defined(DEBUG) || defined(_DEBUG) cpu_heat3D(u_new, u_old, c0, c1, max_iters, Nx, Ny, Nz); #endif float gflops = CalcGflops(compute_timer, max_iters, Nx, Ny, Nz); PrintSummary("3D Heat (7-pt)", "Plane sweeping", compute_timer, HtD_timer, DtH_timer, gflops, max_iters, Nx); _DOUBLE_ t = max_iters * dt; CalcError(h_Uold, u_old, t, h, Nx, Ny, Nz); #if defined(DEBUG) || defined(_DEBUG) //exportToVTK(h_Uold, h, "heat3D.vtk", Nx, Ny, Nz); #endif #pragma omp parallel { unsigned int tid = omp_get_thread_num(); if (tid < 2) { checkCuda(cudaSetDevice(tid)); checkCuda(cudaFree(d_s_Unews[tid])); checkCuda(cudaFree(d_s_Uolds[tid])); checkCuda(cudaFree(d_right_send_buffer[tid])); checkCuda(cudaFree(d_left_send_buffer[tid])); checkCuda(cudaFree(d_right_receive_buffer[tid])); checkCuda(cudaFree(d_left_receive_buffer[tid])); checkCuda(cudaFreeHost(h_s_Unews[tid])); checkCuda(cudaFreeHost(h_s_Uolds[tid])); checkCuda(cudaFreeHost(left_send_buffer[tid])); checkCuda(cudaFreeHost(right_send_buffer[tid])); checkCuda(cudaFreeHost(left_receive_buffer[tid])); checkCuda(cudaFreeHost(right_receive_buffer[tid])); checkCuda(cudaDeviceReset()); } if (tid == 0) { checkCuda(cudaSetDevice(0)); checkCuda(cudaStreamCreate(&right_send_stream)); checkCuda(cudaStreamCreate(&right_receive_stream)); } if (tid == 2) { checkCuda(cudaSetDevice(0)); checkCuda(cudaStreamCreate(&compute_stream_2)); } if (tid == 1) { checkCuda(cudaSetDevice(1)); checkCuda(cudaStreamCreate(&left_send_stream)); checkCuda(cudaStreamCreate(&left_receive_stream)); } if (tid == 3) { checkCuda(cudaSetDevice(1)); checkCuda(cudaStreamCreate(&compute_stream_3)); } } free(u_old); free(u_new); return 0; }
blockchain.c
/********************************************************************* Homework 5 CS 110: Computer Architecture, Spring 2021 ShanghaiTech University * Last Modified: 03/28/2021 *********************************************************************/ #include "blockchain.h" #include <string.h> #include <stdint.h> #include <omp.h> typedef unsigned long long ll; void blockchain_node_init(blk_t *node, uint32_t index, uint32_t timestamp, unsigned char prev_hash[32], unsigned char *data, size_t data_size) { if (!node || !data || !prev_hash) return; node->header.index = index; node->header.timestamp = timestamp; node->header.nonce = -1; memset(node->header.data, 0, sizeof(unsigned char) * 256); memcpy(node->header.prev_hash, prev_hash, HASH_BLOCK_SIZE); memcpy(node->header.data, data, sizeof(unsigned char) * ((data_size < 256) ? data_size : 256)); } struct myStruct { blk_t *node; unsigned char *hash_buf; size_t diff_q; hash_func *func; unsigned char *one_diff; }; struct MySwitch { BOOL finish; // uint64_t thread_nonce; } myswithch; ll global_nouce = (-1); void blockchain_node_hash(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE], hash_func func) { if (node) func((unsigned char *) node, sizeof(blkh_t), (unsigned char *) hash_buf); } BOOL blockchain_node_verify(blk_t *node, blk_t *prev_node, hash_func func) { unsigned char hash_buf[HASH_BLOCK_SIZE]; if (!node || !prev_node) return False; blockchain_node_hash(node, hash_buf, func); if (memcmp(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE)) return False; blockchain_node_hash(prev_node, hash_buf, func); if (memcmp(node->header.prev_hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE)) return False; return True; } void myStructInit(struct myStruct *my_struct, blk_t *node, unsigned char *hash_buf, size_t diff_q, hash_func func, unsigned char *one_diff) { my_struct->node = node; my_struct->hash_buf = hash_buf; my_struct->diff_q = diff_q; my_struct->func = func; my_struct->one_diff = one_diff; } /* The sequiental implementation of mining implemented for you. */ void blockchain_node_mine(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE], size_t diff, hash_func func) { unsigned char one_diff[HASH_BLOCK_SIZE]; size_t diff_q, diff_m; /* struct myStruct my_struct={node,hash_buf,diff_q,func,one_diff}; */ struct myStruct my_struct; diff_q = diff / 8; diff_m = diff % 8; unsigned long long int temp = -1; unsigned long long int nonce_trick = temp / 20; memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE); memset(one_diff, 0, sizeof(unsigned char) * diff_q); one_diff[diff_q] = ((uint8_t) 0xFF) >> diff_m; myswithch.finish = False; myStructInit(&my_struct, node, hash_buf, diff_q, func, one_diff); #pragma omp parallel num_threads(4) { unsigned char t_hash_buf[HASH_BLOCK_SIZE]; blk_t thread_node; blockchain_node_init(&thread_node, node->header.index, node->header.timestamp, node->header.prev_hash, node->header.data, sizeof(node->header.data)); #pragma omp critical { thread_node.header.nonce = global_nouce; global_nouce += nonce_trick; } blk_t *midvar1 = &thread_node; hash_func *midvar2 = func; unsigned char *midvar3 = one_diff; unsigned long midvar4 = sizeof(unsigned char) * diff_q; unsigned char *midvar5 = &t_hash_buf[diff_q]; unsigned char *midvar6 = &one_diff[diff_q]; unsigned long midvar7 = sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q); while (myswithch.finish == False) { blockchain_node_hash(midvar1, t_hash_buf, midvar2); if ((!memcmp(t_hash_buf, midvar3, midvar4)) && memcmp(midvar5, midvar6, midvar7) <= 0) { if (myswithch.finish == False) { myswithch.finish = True; node->header.nonce = thread_node.header.nonce; memcpy(node->hash, t_hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE); memcpy(hash_buf, t_hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE); } break; } thread_node.header.nonce += 1; } } }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ virtual const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ virtual int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; std::string loaded_parameter_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
mandelbrot.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <complex.h> #include <omp.h> #include "mlog/mlog.h" mlog_data_t g_md; int mandelbrot(double complex c, int depth) { int count = 0; double complex z = 0; for (int i = 0; i < depth; i++) { if (cabs(z) >= 2.0) { break; } z = z * z + c; count++; } return count; } void mandelbrot_parallel(int* p, int nx, int ny, int depth, double scale) { #pragma omp parallel for for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int rank = omp_get_thread_num(); void *begin_ptr = mlog_begin_tl(&g_md, rank); double sx = (double)(x - nx / 2) / nx * scale; double sy = (double)(y - ny / 2) / ny * scale; p[y * nx + x] = mandelbrot(sx + sy * I, depth); if (p[y * nx + x] == depth) { mlog_end_tl(&g_md, rank, begin_ptr, "converged"); } else { mlog_end_tl(&g_md, rank, begin_ptr, "diverged"); } } } } void output_mandelbrot(int* p, int nx, int ny) { FILE *fp = fopen("mandelbrot.txt", "w"); for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { fprintf(fp, "%d %d %d\n", x, y, p[y * nx + x]); } } fclose(fp); } void output_mlog() { FILE *fp = fopen("mlog.txt", "w"); mlog_flush_all(&g_md, fp); fclose(fp); } int main(int argc, char* argv[]) { int nx = 2000; int ny = 2000; int depth = 100; double scale = 2.0; int n_threads; #pragma omp parallel { #pragma omp single n_threads = omp_get_num_threads(); } mlog_init(&g_md, n_threads, (2 << 20)); int opt; while ((opt = getopt(argc, argv, "x:y:d:s:h")) != EOF) { switch (opt) { case 'x': nx = atoi(optarg); break; case 'y': ny = atoi(optarg); break; case 'd': depth = atoi(optarg); break; case 's': scale = atof(optarg); break; case 'h': default: printf("Usage: ./mandelbrot -x <nx> -y <ny> -d <depth> -s <scale>\n"); exit(1); } } int* p = (int *)malloc(sizeof(int) * nx * ny); mandelbrot_parallel(p, nx, ny, depth, scale); output_mandelbrot(p, nx, ny); output_mlog(); return 0; }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright @ 2002 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } if (p == (const Quantum *) NULL) return(0.0); GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *q, *r; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); r=edges; while ((q=StringToken(",",&r)) != (char *) NULL) { if (LocaleCompare(q,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(q,"east") == 0) bounds.width=0; if (LocaleCompare(q,"south") == 0) bounds.height=0; if (LocaleCompare(q,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; q=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (q == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } q+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ memset(&edge_background,0,sizeof(edge_background)); artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { for (depth=1; depth < (size_t) MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[j])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[j])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[j] == ScaleAnyToQuantum(ScaleQuantumToAny(p[j],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if (IsGrayImageType(type)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsGrayImageType(image->type)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=depth_map[ScaleQuantumToMap(q[j])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
ast-dump-openmp-begin-declare-variant_10.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX // expected-no-diagnostics #ifdef __cplusplus #define CONST constexpr #else #define CONST __attribute__((const)) #endif int also_before1(void) { return 1; } int also_before2(void) { return 2; } int also_before3(void) { return 3; } int also_before4(void) { return 4; } #pragma omp begin declare variant match(implementation = {vendor(llvm)}) CONST int also_before1(void) { return 0; } static int also_before2(void) { return 0; } __attribute__((nothrow)) int also_before3(void) { return 0; } static CONST __attribute__((nothrow, always_inline)) __inline__ int also_before4(void) { return 0; } #pragma omp end declare variant int main(void) { // Should return 0. return also_before1() + also_before2() + also_before3() + also_before4(); } // Make sure: // - we see the specialization in the AST // - we pick the right callees // C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:1> line:11:5 used also_before1 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:24, line:13:1> // C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:12:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:8:15> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:14:1, line:16:1> line:14:5 used also_before2 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:24, line:16:1> // C-NEXT: | | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:15:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 2 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <line:28:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:17:1, line:19:1> line:17:5 used also_before3 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:24, line:19:1> // C-NEXT: | | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <line:18:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int' 3 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <line:31:1> 'int ({{.*}})' Function [[ADDR_20:0x[a-z0-9]*]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before4 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:24, line:22:1> // C-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:21:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 4 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}})' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_6]] <line:8:15, line:27:1> line:8:15 also_before1[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_28:0x[a-z0-9]*]] <line:25:30, line:27:1> // C-NEXT: | | `-ReturnStmt [[ADDR_29:0x[a-z0-9]*]] <line:26:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_30:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-ConstAttr [[ADDR_31:0x[a-z0-9]*]] <line:8:30> // C-NEXT: |-FunctionDecl [[ADDR_13]] <line:28:1, line:30:1> line:28:1 also_before2[implementation={vendor(llvm)}] 'int ({{.*}})' static // C-NEXT: | `-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:31, line:30:1> // C-NEXT: | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:29:3, col:10> // C-NEXT: | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: |-FunctionDecl [[ADDR_20]] <line:31:1, line:33:1> line:31:1 also_before3[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_35:0x[a-z0-9]*]] <col:49, line:33:1> // C-NEXT: | | `-ReturnStmt [[ADDR_36:0x[a-z0-9]*]] <line:32:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | `-NoThrowAttr [[ADDR_38:0x[a-z0-9]*]] <line:31:16> // C-NEXT: |-FunctionDecl [[ADDR_27]] <line:34:1, line:36:1> line:34:1 also_before4[implementation={vendor(llvm)}] 'int ({{.*}})' static inline // C-NEXT: | |-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:88, line:36:1> // C-NEXT: | | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:35:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: | |-ConstAttr [[ADDR_42:0x[a-z0-9]*]] <line:8:30> // C-NEXT: | |-NoThrowAttr [[ADDR_43:0x[a-z0-9]*]] <line:34:29> // C-NEXT: | `-AlwaysInlineAttr [[ADDR_44:0x[a-z0-9]*]] <col:38> always_inline // C-NEXT: `-FunctionDecl [[ADDR_45:0x[a-z0-9]*]] <line:40:1, line:43:1> line:40:5 main 'int ({{.*}})' // C-NEXT: `-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:16, line:43:1> // C-NEXT: `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:42:3, col:74> // C-NEXT: `-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:10, col:74> 'int' '+' // C-NEXT: |-BinaryOperator [[ADDR_49:0x[a-z0-9]*]] <col:10, col:57> 'int' '+' // C-NEXT: | |-BinaryOperator [[ADDR_50:0x[a-z0-9]*]] <col:10, col:40> 'int' '+' // C-NEXT: | | |-PseudoObjectExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int' // C-NEXT: | | | |-CallExpr [[ADDR_52:0x[a-z0-9]*]] <col:10, col:23> 'int' // C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' Function [[ADDR_0]] 'also_before1' 'int ({{.*}})' // C-NEXT: | | | `-CallExpr [[ADDR_55:0x[a-z0-9]*]] <line:8:15, line:42:23> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_56:0x[a-z0-9]*]] <line:8:15> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_5]] <col:15> 'int ({{.*}})' Function [[ADDR_6]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | | `-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <line:42:27, col:40> 'int' // C-NEXT: | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:27, col:40> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:27> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:27> 'int ({{.*}})' Function [[ADDR_7]] 'also_before2' 'int ({{.*}})' // C-NEXT: | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:28:1, line:42:40> 'int' // C-NEXT: | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:28:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:42:44, col:57> 'int' // C-NEXT: | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:44, col:57> 'int' // C-NEXT: | | `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:44> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:44> 'int ({{.*}})' Function [[ADDR_14]] 'also_before3' 'int ({{.*}})' // C-NEXT: | `-CallExpr [[ADDR_67:0x[a-z0-9]*]] <line:31:1, line:42:57> 'int' // C-NEXT: | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <line:31:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: `-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <line:42:61, col:74> 'int' // C-NEXT: |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:61, col:74> 'int' // C-NEXT: | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:61> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:61> 'int ({{.*}})' Function [[ADDR_21]] 'also_before4' 'int ({{.*}})' // C-NEXT: `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:34:1, line:42:74> 'int' // C-NEXT: `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: `-DeclRefExpr [[ADDR_26]] <col:1> 'int ({{.*}})' Function [[ADDR_27]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:1> line:11:5 used also_before1 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:24, line:13:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:12:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:6:15> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:14:1, line:16:1> line:14:5 used also_before2 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:24, line:16:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:15:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 2 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <line:28:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:17:1, line:19:1> line:17:5 used also_before3 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:24, line:19:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <line:18:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int' 3 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <line:31:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_20:0x[a-z0-9]*]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))' // CXX-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before4 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:24, line:22:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:21:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 4 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))' // CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:6:15, line:27:1> line:6:15 constexpr also_before1[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_28:0x[a-z0-9]*]] <line:25:30, line:27:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_29:0x[a-z0-9]*]] <line:26:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_30:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_13]] <line:28:1, line:30:1> line:28:1 also_before2[implementation={vendor(llvm)}] 'int ({{.*}})' static // CXX-NEXT: | `-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:31, line:30:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:29:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_33:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_20]] <line:31:1, line:33:1> line:31:1 also_before3[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' // CXX-NEXT: | `-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:49, line:33:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:32:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_27]] <line:34:1, line:36:1> line:34:1 constexpr also_before4[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' static inline // CXX-NEXT: | |-CompoundStmt [[ADDR_37:0x[a-z0-9]*]] <col:88, line:36:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_38:0x[a-z0-9]*]] <line:35:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_39:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: | `-AlwaysInlineAttr [[ADDR_40:0x[a-z0-9]*]] <line:34:38> always_inline // CXX-NEXT: `-FunctionDecl [[ADDR_41:0x[a-z0-9]*]] <line:40:1, line:43:1> line:40:5 main 'int ({{.*}})' // CXX-NEXT: `-CompoundStmt [[ADDR_42:0x[a-z0-9]*]] <col:16, line:43:1> // CXX-NEXT: `-ReturnStmt [[ADDR_43:0x[a-z0-9]*]] <line:42:3, col:74> // CXX-NEXT: `-BinaryOperator [[ADDR_44:0x[a-z0-9]*]] <col:10, col:74> 'int' '+' // CXX-NEXT: |-BinaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:10, col:57> 'int' '+' // CXX-NEXT: | |-BinaryOperator [[ADDR_46:0x[a-z0-9]*]] <col:10, col:40> 'int' '+' // CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_47:0x[a-z0-9]*]] <col:10, col:23> 'int' // CXX-NEXT: | | | |-CallExpr [[ADDR_48:0x[a-z0-9]*]] <col:10, col:23> 'int' // CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_49:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_50:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before1' 'int ({{.*}})' // CXX-NEXT: | | | `-CallExpr [[ADDR_51:0x[a-z0-9]*]] <line:6:15, line:42:23> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_52:0x[a-z0-9]*]] <line:6:15> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_5]] <col:15> 'int ({{.*}})' Function [[ADDR_6]] 'also_before1[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_53:0x[a-z0-9]*]] <line:42:27, col:40> 'int' // CXX-NEXT: | | |-CallExpr [[ADDR_54:0x[a-z0-9]*]] <col:27, col:40> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <col:27> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:27> 'int ({{.*}})' {{.*}}Function [[ADDR_7]] 'also_before2' 'int ({{.*}})' // CXX-NEXT: | | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:28:1, line:42:40> 'int' // CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <line:28:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_before2[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | `-PseudoObjectExpr [[ADDR_59:0x[a-z0-9]*]] <line:42:44, col:57> 'int' // CXX-NEXT: | |-CallExpr [[ADDR_60:0x[a-z0-9]*]] <col:44, col:57> 'int' // CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:44> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:44> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'also_before3' 'int ({{.*}})' // CXX-NEXT: | `-CallExpr [[ADDR_63:0x[a-z0-9]*]] <line:31:1, line:42:57> 'int' // CXX-NEXT: | `-ImplicitCastExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay> // CXX-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_20]] 'also_before3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))' // CXX-NEXT: `-PseudoObjectExpr [[ADDR_65:0x[a-z0-9]*]] <line:42:61, col:74> 'int' // CXX-NEXT: |-CallExpr [[ADDR_66:0x[a-z0-9]*]] <col:61, col:74> 'int' // CXX-NEXT: | `-ImplicitCastExpr [[ADDR_67:0x[a-z0-9]*]] <col:61> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | `-DeclRefExpr [[ADDR_68:0x[a-z0-9]*]] <col:61> 'int ({{.*}})' {{.*}}Function [[ADDR_21]] 'also_before4' 'int ({{.*}})' // CXX-NEXT: `-CallExpr [[ADDR_69:0x[a-z0-9]*]] <line:34:1, line:42:74> 'int' // CXX-NEXT: `-ImplicitCastExpr [[ADDR_70:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay> // CXX-NEXT: `-DeclRefExpr [[ADDR_26]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_27]] 'also_before4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
8449.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd schedule(static, 1) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute thread_limit(128) simd for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]). * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/ft_ao.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXPCUTOFF 100 #define NCTRMAX 72 void CINTg1e_index_xyz(int *idx, const CINTEnvVars *envs); double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); int CINTinit_int1e_EnvVars(CINTEnvVars *envs, const int *ng, const int *shls, const int *atm, const int natm, const int *bas, const int nbas, const double *env); void GTO_ft_init1e_envs(CINTEnvVars *envs, const int *ng, const int *shls, const int *atm, const int natm, const int *bas, const int nbas, const double *env) { CINTinit_int1e_EnvVars(envs, ng, shls, atm, natm, bas, nbas, env); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d_withGv(double complex *g, double *rijri, double aij, double *Gv, int topl, size_t NGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * NGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + NGv; double *kza2 = kya2 + NGv; a2 = .5 / aij; for (n = 0; n < NGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + NGv; for (n = 0; n < NGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * NGv; dec1 = p0 - _LEN_CART[l ] * NGv; dec2 = dec1 - _LEN_CART[l-1] * NGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * NGv; p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < NGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); for (n = 0; n < NGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += NGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void plain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj) { if (lj == 0) { memcpy(out, g, sizeof(double)*_LEN_CART[li]); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double *g00 = g; const double *g10 = g + row_00*col_00; int i, j; const double *p00, *p10; double *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p01[i] = p10[0] + rirj[0] * p00[0]; } p01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p01[i] = p10[0] + rirj[1] * p00[0]; } p01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p01[i] = p10[0] + rirj[2] * p00[0]; } } } static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += NGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += NGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += NGv; } } } /* * transpose i, j when storing into out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += NGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += NGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } /* (li+lj,0) => (li,lj) */ void GTOplain_vrr2d(double *out, double *g, double *gbuf2, CINTEnvVars *envs) { const int li = envs->li_ceil; const int lj = envs->lj_ceil; const int nmax = li + lj; const double *ri = envs->ri; const double *rj = envs->rj; double *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { plain_vrr2d_ket_inc1(pg01, pg00, rirj, i, j); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00; pg01 += row_01*col_01; } } plain_vrr2d_ket_inc1(out, g01, rirj, li, lj); } static void vrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, CINTEnvVars *envs, size_t NGv) { const int li = envs->li_ceil; const int lj = envs->lj_ceil; const int nmax = li + lj; const double *ri = envs->ri; const double *rj = envs->rj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, CINTEnvVars *envs, size_t NGv) { const int li = envs->li_ceil; const int lj = envs->lj_ceil; const int nmax = li + lj; const double *ri = envs->ri; const double *rj = envs->rj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijri, aij, Gv, topl, NGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * NGv; off1 = i * NGv; off2 = (i+1) * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { off0 = dj * NGv; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, int *idx, const CINTEnvVars *envs, double *Gv, size_t NGv, int empty) { int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { gc[i] = gp[i] * c; } gc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { gc[i] += gp[i] * c; } } gc += nf; } } } static void transpose(double complex *out, double complex *in, int nf, int comp, size_t NGv) { size_t n, k, ic; double complex *pin; for (ic = 0; ic < comp; ic++) { for (n = 0; n < nf; n++) { pin = in + (n*comp+ic) * NGv; for (k = 0; k < NGv; k++) { out[n*NGv+k] = pin[k]; } } out += nf * NGv; } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs, NGv); } else { hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs, NGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv; const size_t leng = nf * n_comp * NGv; const size_t leni = nf * i_ctr * n_comp * NGv; size_t lenj = 0; if (n_comp > 1) { lenj = nf * i_ctr * j_ctr * n_comp * NGv; } double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj)); double complex *g1 = g + len1; double complex *gout, *gctri, *gctrj; if (n_comp == 1) { gctrj = gctr; } else { gctrj = g1; g1 += lenj; } if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); CINTg1e_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { envs->aj = aj[jp]; if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { envs->ai = ai[ip]; aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); (*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*n_comp*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (n_comp > 1 && !*jempty) { transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv); } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < NGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*NGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; double kkpool[nx+ny+nz]; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; char empty[nx+ny+nz]; char *xempty = empty; char *yempty = xempty + nx; char *zempty = yempty + ny; memset(empty, 1, sizeof(char)*(nx+ny+nz)); int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr, kk; for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const size_t NGv) { int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } void GTO_ft_c2s_cart(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 void GTO_ft_c2s_sph(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l); for (k = NGv; k < dj*NGv; k+=NGv) { pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, pij, di, dj, dims[0], NGv); gctr += nf * NGv; } } free(buf1); } static void _ft_zset0(double complex *out, int *dims, int *counts, int comp, size_t NGv) { double complex *pout; int i, j, k, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < counts[1]; j++) { pout = out + j * counts[0] * NGv; for (i = 0; i < counts[0]; i++) { for (k = 0; k < NGv; k++) { pout[i*NGv+k] = 0; } } } out += dims[0] * dims[1] * NGv; } } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_aopair_drv(double complex *out, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, CINTEnvVars *envs) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const size_t nc = envs->nf * i_ctr * j_ctr * NGv; double complex *gctr = malloc(sizeof(double complex) * nc * n_comp); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { const int *shls = envs->shls; const int *bas = envs->bas; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, envs, eval_gz, fac, Gv, b, gxyz, gs, NGv); int counts[4]; if (f_c2s == &GTO_ft_c2s_sph) { counts[0] = (envs->i_l*2+1) * i_ctr; counts[1] = (envs->j_l*2+1) * j_ctr; } else { // f_c2s == &GTO_ft_c2s_cart counts[0] = envs->nfi * i_ctr; counts[1] = envs->nfj * j_ctr; } if (dims == NULL) { dims = counts; } size_t nout = dims[0] * dims[1] * NGv; int n; if (has_value) { for (n = 0; n < n_comp; n++) { (*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv); } } else { _ft_zset0(out, dims, counts, n_comp, NGv); } free(gctr); return has_value; } int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_cart, fac, Gv, b, gxyz, gs, nGv, &envs); } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_sph, fac, Gv, b, gxyz, gs, nGv, &envs); } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } out += nrow * ncol * NGv; } } } void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off0 = i0 * (i0 + 1) / 2; const size_t off = ip * (ip + 1) / 2 - off0 + jp; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(), double complex *mat, int comp, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, fill, mat, comp, shls_slice, \ ao_loc, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env) { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * NCTRMAX*NCTRMAX*comp*(size_t)nGv); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, mat, comp, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz, double complex *out, int comp, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, out, comp, Gv, b, gxyz, gs, \ nGv, npair, shls_lst, ao_loc, \ atm, natm, bas, nbas, env, ijloc) \ private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); }
MzXMLHandler.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2013. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Andreas Bertsch $ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H #define OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H #include <OpenMS/CONCEPT/ProgressLogger.h> #include <OpenMS/FORMAT/Base64.h> #include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h> #include <OpenMS/FORMAT/HANDLERS/XMLHandler.h> #include <OpenMS/DATASTRUCTURES/String.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <stack> namespace OpenMS { class MetaInfoInterface; namespace Internal { /** @brief XML handlers for MzXMLFile MapType has to be a MSExperiment or have the same interface. Do not use this class. It is only needed in MzXMLFile. */ template <typename MapType> class MzXMLHandler : public XMLHandler { public: /**@name Constructors and destructor */ //@{ /// Constructor for a read-only handler MzXMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) : XMLHandler(filename, version), exp_(&exp), cexp_(0), decoder_(), nesting_level_(0), skip_spectrum_(false), spec_write_counter_(1), consumer_(NULL), scan_count_(0), logger_(logger) { init_(); } /// Constructor for a write-only handler MzXMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) : XMLHandler(filename, version), exp_(0), cexp_(&exp), decoder_(), nesting_level_(0), skip_spectrum_(false), spec_write_counter_(1), consumer_(NULL), scan_count_(0), logger_(logger) { init_(); } /// Destructor virtual ~MzXMLHandler() {} //@} // Docu in base class virtual void endElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname); // Docu in base class virtual void startElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname, const xercesc::Attributes& attributes); // Docu in base class virtual void characters(const XMLCh* const chars, const XMLSize_t length); /// Write the contents to a stream void writeTo(std::ostream& os); /// Sets the options void setOptions(const PeakFileOptions& options) { options_ = options; } ///Gets the scan count UInt getScanCount() { return scan_count_; } /// Set the IMSDataConsumer consumer which will consume the read data void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType> * consumer) { consumer_ = consumer; } private: /// initialize members (call from C'tor) void init_() { cv_terms_.resize(6); //Polarity String("any;+;-").split(';', cv_terms_[0]); //Scan type // is no longer used cv_terms_[1] is empty now //Ionization method String(";ESI;EI;CI;FAB;;;;;;;;;;;;;APCI;;;;;;;;MALDI").split(';', cv_terms_[2]); cv_terms_[2].resize(IonSource::SIZE_OF_IONIZATIONMETHOD); //Mass analyzer String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';', cv_terms_[3]); cv_terms_[3].resize(MassAnalyzer::SIZE_OF_ANALYZERTYPE); //Detector String(";EMT;;;Faraday Cup;;;;;Channeltron;Daly;Microchannel plate").split(';', cv_terms_[4]); cv_terms_[4].resize(IonDetector::SIZE_OF_TYPE); //Resolution method String(";FWHM;TenPercentValley;Baseline").split(';', cv_terms_[5]); cv_terms_[5].resize(MassAnalyzer::SIZE_OF_RESOLUTIONMETHOD); /* // OLD: cv_terms_.resize(6); //Polarity String("any;+;-").split(';',cv_terms_[0]); //Scan type // is no longer used cv_terms_[1] is empty now //Ionization method String(";ESI;EI;CI;FAB;TSP;MALDI;FD;FI;PD;SI;TI;API;ISI;CID;CAD;HN;APCI;APPI;ICP").split(';',cv_terms_[2]); //Mass analyzer String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';',cv_terms_[3]); //Detector String(";EMT;Daly;;Faraday Cup;;;;Channeltron").split(';',cv_terms_[4]); //Resolution method String(";FWHM;TenPercentValley;Baseline").split(';',cv_terms_[5]); */ } protected: /// Peak type typedef typename MapType::PeakType PeakType; /// Spectrum type typedef MSSpectrum<PeakType> SpectrumType; /// map pointer for reading MapType* exp_; /// map pointer for writing const MapType* cexp_; /// Options for loading and storing PeakFileOptions options_; /**@name temporary data structures to hold parsed data */ //@{ Base64 decoder_; Int nesting_level_; /** @brief Data necessary to generate a single spectrum Small struct holds all data necessary to populate a spectrum at a later timepoint (since reading of the base64 data and generation of spectra can be done at distinct timepoints). */ struct SpectrumData { UInt peak_count_; String precision_; String compressionType_; String char_rest_; SpectrumType spectrum; bool skip_data; }; /// Vector of spectrum data stored for later parallel processing std::vector< SpectrumData > spectrum_data_; //@} /// Flag that indicates whether this spectrum should be skipped (due to options) bool skip_spectrum_; /// spectrum counter (spectra without peaks are not written) UInt spec_write_counter_; /// Consumer class to work on spectra Interfaces::IMSDataConsumer<MapType>* consumer_; /// Consumer class to work on spectra UInt scan_count_; /// Progress logging class const ProgressLogger& logger_; /// write metaInfo to xml (usually in nameValue-tag) inline void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, int indent = 4, String tag = "nameValue") { std::vector<String> keys; // Vector to hold keys to meta info meta.getKeys(keys); for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it) { if ((*it)[0] != '#') // internally used meta info start with '#' { os << String(indent, '\t') << "<" << tag << " name=\"" << *it << "\" value=\"" << meta.getMetaValue(*it) << "\"/>\n"; } } } /// data processing auxiliary variable std::vector<DataProcessing> data_processing_; /** @brief Fill a single spectrum with data from input @note Do not modify any internal state variables of the class since this function will be executed in parallel. */ void doPopulateSpectraWithData_(SpectrumData & spectrum_data) { typedef typename SpectrumType::PeakType PeakType; //std::cout << "reading scan" << "\n"; if (spectrum_data.char_rest_ == "") // no peaks { return; } //remove whitespaces from binary data //this should not be necessary, but linebreaks inside the base64 data are unfortunately no exception spectrum_data.char_rest_.removeWhitespaces(); if (spectrum_data.precision_ == "64") { std::vector<double> data; if (spectrum_data.compressionType_ == "zlib") { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true); } else { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data); } spectrum_data.char_rest_ = ""; PeakType peak; //push_back the peaks into the container for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2) { // check if peak in in the specified m/z and intensity range if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n]))) && (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1])))) { peak.setMZ(data[n]); peak.setIntensity(data[n + 1]); spectrum_data.spectrum.push_back(peak); } } } else //precision 32 { std::vector<float> data; if (spectrum_data.compressionType_ == "zlib") { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true); } else { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data); } spectrum_data.char_rest_ = ""; PeakType peak; //push_back the peaks into the container for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2) { if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n]))) && (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1])))) { peak.setMZ(data[n]); peak.setIntensity(data[n + 1]); spectrum_data.spectrum.push_back(peak); } } } } /** @brief Populate all spectra on the stack with data from input Will populate all spectra on the current work stack with data (using multiple threads if available) and append them to the result. */ void populateSpectraWithData_() { // Whether spectrum should be populated with data if (options_.getFillData()) { size_t errCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++) { // parallel exception catching and re-throwing business if (!errCount) // no need to parse further if already an error was encountered { try { doPopulateSpectraWithData_(spectrum_data_[i]); } catch (...) { #pragma omp critical(HandleException) ++errCount; } } } if (errCount != 0) { throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data."); } } // Append all spectra for (Size i = 0; i < spectrum_data_.size(); i++) { if (consumer_ != NULL) { consumer_->consumeSpectrum(spectrum_data_[i].spectrum); if (options_.getAlwaysAppendData()) { exp_->addSpectrum(spectrum_data_[i].spectrum); } } else { exp_->addSpectrum(spectrum_data_[i].spectrum); } } // Delete batch spectrum_data_.clear(); } private: /// Not implemented MzXMLHandler(); static const XMLCh* s_value_; static const XMLCh* s_count_; static const XMLCh* s_type_; static const XMLCh* s_name_; static const XMLCh* s_version_; static const XMLCh* s_filename_; static const XMLCh* s_filetype_; static const XMLCh* s_filesha1_; static const XMLCh* s_completiontime_; static const XMLCh* s_precision_; static const XMLCh* s_byteorder_; static const XMLCh* s_pairorder_; static const XMLCh* s_compressionType_; static const XMLCh* s_precursorintensity_; static const XMLCh* s_precursorcharge_; static const XMLCh* s_windowwideness_; static const XMLCh* s_mslevel_; static const XMLCh* s_peakscount_; static const XMLCh* s_polarity_; static const XMLCh* s_scantype_; static const XMLCh* s_filterline_; static const XMLCh* s_retentiontime_; static const XMLCh* s_startmz_; static const XMLCh* s_endmz_; static const XMLCh* s_first_; static const XMLCh* s_last_; static const XMLCh* s_phone_; static const XMLCh* s_email_; static const XMLCh* s_uri_; static const XMLCh* s_num_; static const XMLCh* s_intensitycutoff_; static const XMLCh* s_centroided_; static const XMLCh* s_deisotoped_; static const XMLCh* s_chargedeconvoluted_; // init all the static members, which is necessary because otherwise the undefined order will cause problems void initStaticMembers_() { static bool init(false); if (!init) { s_value_ = xercesc::XMLString::transcode("value"); s_count_ = xercesc::XMLString::transcode("scanCount"); s_type_ = xercesc::XMLString::transcode("type"); s_name_ = xercesc::XMLString::transcode("name"); s_version_ = xercesc::XMLString::transcode("version"); s_filename_ = xercesc::XMLString::transcode("fileName"); s_filetype_ = xercesc::XMLString::transcode("fileType"); s_filesha1_ = xercesc::XMLString::transcode("fileSha1"); s_completiontime_ = xercesc::XMLString::transcode("completionTime"); s_precision_ = xercesc::XMLString::transcode("precision"); s_byteorder_ = xercesc::XMLString::transcode("byteOrder"); s_pairorder_ = xercesc::XMLString::transcode("pairOrder"); s_compressionType_ = xercesc::XMLString::transcode("compressionType"); s_precursorintensity_ = xercesc::XMLString::transcode("precursorIntensity"); s_precursorcharge_ = xercesc::XMLString::transcode("precursorCharge"); s_windowwideness_ = xercesc::XMLString::transcode("windowWideness"); s_mslevel_ = xercesc::XMLString::transcode("msLevel"); s_peakscount_ = xercesc::XMLString::transcode("peaksCount"); s_polarity_ = xercesc::XMLString::transcode("polarity"); s_scantype_ = xercesc::XMLString::transcode("scanType"); s_filterline_ = xercesc::XMLString::transcode("filterLine"); s_retentiontime_ = xercesc::XMLString::transcode("retentionTime"); s_startmz_ = xercesc::XMLString::transcode("startMz"); s_endmz_ = xercesc::XMLString::transcode("endMz"); s_first_ = xercesc::XMLString::transcode("first"); s_last_ = xercesc::XMLString::transcode("last"); s_phone_ = xercesc::XMLString::transcode("phone"); s_email_ = xercesc::XMLString::transcode("email"); s_uri_ = xercesc::XMLString::transcode("URI"); s_num_ = xercesc::XMLString::transcode("num"); s_intensitycutoff_ = xercesc::XMLString::transcode("intensityCutoff"); s_centroided_ = xercesc::XMLString::transcode("centroided"); s_deisotoped_ = xercesc::XMLString::transcode("deisotoped"); s_chargedeconvoluted_ = xercesc::XMLString::transcode("chargeDeconvoluted"); init = true; } return; } }; //-------------------------------------------------------------------------------- // this cannot be moved into a function as VS2008 does not allow more than 31 static members in a function .. don't ask... template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_value_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_count_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_type_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_name_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_version_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filename_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filetype_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filesha1_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_completiontime_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precision_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_byteorder_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_pairorder_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_compressionType_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precursorintensity_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precursorcharge_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_windowwideness_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_mslevel_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_peakscount_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_polarity_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_scantype_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filterline_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_retentiontime_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_startmz_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_endmz_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_first_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_last_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_phone_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_email_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_uri_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_num_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_intensitycutoff_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_centroided_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_deisotoped_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_chargedeconvoluted_ = 0; template <typename MapType> void MzXMLHandler<MapType>::startElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname, const xercesc::Attributes& attributes) { OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") static bool init_static_members(false); if (!init_static_members) { initStaticMembers_(); } String tag = sm_.convert(qname); open_tags_.push_back(tag); //std::cout << " -- Start -- "<< tag << " -- " << "\n"; //Skip all tags until the the next scan if (skip_spectrum_ && tag != "scan") return; if (tag == "msRun") { Int count = 0; optionalAttributeAsInt_(count, attributes, s_count_); exp_->reserve(count); logger_.startProgress(0, count, "loading mzXML file"); scan_count_ = 0; data_processing_.clear(); //start and end time are xs:duration. This makes no sense => ignore them } else if (tag == "parentFile") { SourceFile sf; sf.setNameOfFile(attributeAsString_(attributes, s_filename_)); sf.setFileType(attributeAsString_(attributes, s_filetype_)); sf.setChecksum(attributeAsString_(attributes, s_filesha1_), SourceFile::SHA1); exp_->getSourceFiles().push_back(sf); } else if (tag == "software") { String& parent_tag = *(open_tags_.end() - 2); if (parent_tag == "dataProcessing") { data_processing_.back().getSoftware().setVersion(attributeAsString_(attributes, s_version_)); data_processing_.back().getSoftware().setName(attributeAsString_(attributes, s_name_)); data_processing_.back().setMetaValue("#type", String(attributeAsString_(attributes, s_type_))); String time; optionalAttributeAsString_(time, attributes, s_completiontime_); data_processing_.back().setCompletionTime(asDateTime_(time)); } else if (parent_tag == "msInstrument") { exp_->getInstrument().getSoftware().setVersion(attributeAsString_(attributes, s_version_)); exp_->getInstrument().getSoftware().setName(attributeAsString_(attributes, s_name_)); } } else if (tag == "peaks") { //precision spectrum_data_.back().precision_ = "32"; optionalAttributeAsString_(spectrum_data_.back().precision_, attributes, s_precision_); if (spectrum_data_.back().precision_ != "32" && spectrum_data_.back().precision_ != "64") { error(LOAD, String("Invalid precision '") + spectrum_data_.back().precision_ + "' in element 'peaks'"); } //byte order String byte_order = "network"; optionalAttributeAsString_(byte_order, attributes, s_byteorder_); if (byte_order != "network") { error(LOAD, String("Invalid or missing byte order '") + byte_order + "' in element 'peaks'. Must be 'network'!"); } //pair order String pair_order = "m/z-int"; optionalAttributeAsString_(pair_order, attributes, s_pairorder_); if (pair_order != "m/z-int") { error(LOAD, String("Invalid or missing pair order '") + pair_order + "' in element 'peaks'. Must be 'm/z-int'!"); } //compressionType spectrum_data_.back().compressionType_ = "none"; optionalAttributeAsString_(spectrum_data_.back().compressionType_, attributes, s_compressionType_); if (spectrum_data_.back().compressionType_ != "none" && spectrum_data_.back().compressionType_ != "zlib") { error(LOAD, String("Invalid compression type ") + spectrum_data_.back().compressionType_ + "in elements 'peaks'. Must be 'none' or 'zlib'! "); } } else if (tag == "precursorMz") { //add new precursor spectrum_data_.back().spectrum.getPrecursors().push_back(Precursor()); //intensity try { spectrum_data_.back().spectrum.getPrecursors().back().setIntensity(attributeAsDouble_(attributes, s_precursorintensity_)); } catch (Exception::ParseError& /*e*/) { error(LOAD, "Mandatory attribute 'precursorIntensity' of tag 'precursorMz' not found! Setting precursor intensity to zero!"); } //charge Int charge = 0; if (optionalAttributeAsInt_(charge, attributes, s_precursorcharge_)) { spectrum_data_.back().spectrum.getPrecursors().back().setCharge(charge); } //window bounds (here only the width is stored in both fields - this is corrected when we parse the m/z position) double window = 0.0; if (optionalAttributeAsDouble_(window, attributes, s_windowwideness_)) { spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(window); } } else if (tag == "scan") { skip_spectrum_ = false; nesting_level_++; if (options_.getMetadataOnly()) throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__); // check if the scan is in the desired MS / RT range UInt ms_level = attributeAsInt_(attributes, s_mslevel_); if (ms_level == 0) { warning(LOAD, String("Invalid 'msLevel' attribute with value '0' in 'scan' element found. Assuming ms level 1!")); ms_level = 1; } //parse retention time and convert it from xs:duration to seconds double retention_time = 0.0; String time_string = ""; if (optionalAttributeAsString_(time_string, attributes, s_retentiontime_)) { time_string = time_string.suffix('T'); //std::cout << "Initial trim: " << time_string << "\n"; if (time_string.has('H')) { retention_time += 3600 * asDouble_(time_string.prefix('H')); time_string = time_string.suffix('H'); //std::cout << "After H: " << time_string << "\n"; } if (time_string.has('M')) { retention_time += 60 * asDouble_(time_string.prefix('M')); time_string = time_string.suffix('M'); //std::cout << "After M: " << time_string << "\n"; } if (time_string.has('S')) { retention_time += asDouble_(time_string.prefix('S')); time_string = time_string.suffix('S'); //std::cout << "After S: " << time_string << "\n"; } } logger_.setProgress(scan_count_); if ((options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(retention_time))) || (options_.hasMSLevels() && !options_.containsMSLevel(ms_level)) || options_.getSizeOnly()) { // skip this tag skip_spectrum_ = true; ++scan_count_; return; } // Add a new spectrum, initialize and set MS level and RT spectrum_data_.resize(spectrum_data_.size() + 1); // TODO !! spectrum_data_.back().peak_count_ = 0; spectrum_data_.back().spectrum.setMSLevel(ms_level); spectrum_data_.back().spectrum.setRT(retention_time); spectrum_data_.back().spectrum.setNativeID(String("scan=") + attributeAsString_(attributes, s_num_)); //peak count == twice the scan size spectrum_data_.back().peak_count_ = attributeAsInt_(attributes, s_peakscount_); spectrum_data_.back().spectrum.reserve(spectrum_data_.back().peak_count_ / 2 + 1); spectrum_data_.back().spectrum.setDataProcessing(data_processing_); //centroided, chargeDeconvoluted, deisotoped, collisionEnergy are ignored //other optional attributes ScanWindow window; optionalAttributeAsDouble_(window.begin, attributes, s_startmz_); optionalAttributeAsDouble_(window.end, attributes, s_endmz_); if (window.begin != 0.0 || window.end != 0.0) { spectrum_data_.back().spectrum.getInstrumentSettings().getScanWindows().push_back(window); } String polarity = "any"; optionalAttributeAsString_(polarity, attributes, s_polarity_); spectrum_data_.back().spectrum.getInstrumentSettings().setPolarity((IonSource::Polarity) cvStringToEnum_(0, polarity, "polarity")); // Filter string (see CV term MS:1000512 in mzML) String filterLine = ""; optionalAttributeAsString_(filterLine, attributes, s_filterline_); if (!filterLine.empty()) { spectrum_data_.back().spectrum.setMetaValue("filter string", filterLine); } String type = ""; optionalAttributeAsString_(type, attributes, s_scantype_); if (type == "") { //unknown/unset => do nothing here => no warning in the end } else if (type == "zoom") { spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true); spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "Full") { if (ms_level > 1) spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM); else spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "SIM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SIM); } else if (type == "SRM" || type == "MRM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SRM); } else if (type == "CRM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::CRM); } else if (type == "Q1") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "Q3") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "EMS") //Non-standard type: Enhanced MS (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "EPI") //Non-standard type: Enhanced Product Ion (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); spectrum_data_.back().spectrum.setMSLevel(2); } else if (type == "ER") // Non-standard type: Enhanced Resolution (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true); spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); warning(LOAD, String("Unknown scan mode '") + type + "'. Assuming full scan"); } ++scan_count_; } else if (tag == "operator") { exp_->getContacts().resize(1); exp_->getContacts().back().setFirstName(attributeAsString_(attributes, s_first_)); exp_->getContacts().back().setLastName(attributeAsString_(attributes, s_last_)); String tmp = ""; optionalAttributeAsString_(tmp, attributes, s_email_); exp_->getContacts().back().setEmail(tmp); tmp = ""; optionalAttributeAsString_(tmp, attributes, s_phone_); if (tmp != "") { exp_->getContacts().back().setMetaValue("#phone", tmp); } tmp = ""; optionalAttributeAsString_(tmp, attributes, s_uri_); exp_->getContacts().back().setURL(tmp); } else if (tag == "msManufacturer") { exp_->getInstrument().setVendor(attributeAsString_(attributes, s_value_)); } else if (tag == "msModel") { exp_->getInstrument().setModel(attributeAsString_(attributes, s_value_)); } else if (tag == "msIonisation") { exp_->getInstrument().getIonSources().resize(1); exp_->getInstrument().getIonSources()[0].setIonizationMethod((IonSource::IonizationMethod) cvStringToEnum_(2, attributeAsString_(attributes, s_value_), "msIonization")); } else if (tag == "msMassAnalyzer") { exp_->getInstrument().getMassAnalyzers().resize(1); exp_->getInstrument().getMassAnalyzers()[0].setType((MassAnalyzer::AnalyzerType) cvStringToEnum_(3, attributeAsString_(attributes, s_value_), "msMassAnalyzer")); } else if (tag == "msDetector") { exp_->getInstrument().getIonDetectors().resize(1); exp_->getInstrument().getIonDetectors()[0].setType((IonDetector::Type) cvStringToEnum_(4, attributeAsString_(attributes, s_value_), "msDetector")); } else if (tag == "msResolution") { exp_->getInstrument().getMassAnalyzers()[0].setResolutionMethod((MassAnalyzer::ResolutionMethod) cvStringToEnum_(5, attributeAsString_(attributes, s_value_), "msResolution")); } else if (tag == "dataProcessing") { data_processing_.push_back(DataProcessing()); String boolean = ""; optionalAttributeAsString_(boolean, attributes, s_deisotoped_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::DEISOTOPING); } boolean = ""; optionalAttributeAsString_(boolean, attributes, s_chargedeconvoluted_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION); } double cutoff = 0.0; optionalAttributeAsDouble_(cutoff, attributes, s_intensitycutoff_); if (cutoff != 0.0) { data_processing_.back().setMetaValue("#intensity_cutoff", cutoff); } boolean = ""; optionalAttributeAsString_(boolean, attributes, s_centroided_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::PEAK_PICKING); } } else if (tag == "nameValue") { String name = ""; optionalAttributeAsString_(name, attributes, s_name_); if (name == "") return; String value = ""; optionalAttributeAsString_(value, attributes, s_value_); String& parent_tag = *(open_tags_.end() - 2); if (parent_tag == "msInstrument") { exp_->getInstrument().setMetaValue(name, value); } else if (parent_tag == "scan") { spectrum_data_.back().spectrum.setMetaValue(name, value); } else { std::cout << " Warning: Unexpected tag 'nameValue' in tag '" << parent_tag << "'" << "\n"; } } else if (tag == "processingOperation") { String name = ""; optionalAttributeAsString_(name, attributes, s_name_); if (name == "") return; String value = ""; optionalAttributeAsString_(value, attributes, s_value_); data_processing_.back().setMetaValue(name, value); } //std::cout << " -- !Start -- " << "\n"; } template <typename MapType> void MzXMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname) { OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") //std::cout << " -- End -- " << sm_.convert(qname) << " -- " << "\n"; static const XMLCh* s_mzxml = xercesc::XMLString::transcode("mzXML"); static const XMLCh* s_scan = xercesc::XMLString::transcode("scan"); open_tags_.pop_back(); if (equal_(qname, s_mzxml)) { // Flush the remaining data populateSpectraWithData_(); // End of mzXML logger_.endProgress(); } else if (equal_(qname, s_scan)) { // End of scan: go up one nesting level // Check whether to populate spectra when on highest nesting level nesting_level_--; OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") if (nesting_level_ == 0 && spectrum_data_.size() >= options_.getMaxDataPoolSize()) { populateSpectraWithData_(); } } //std::cout << " -- End -- " << "\n"; sm_.clear(); } template <typename MapType> void MzXMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length) { //Abort if this spectrum should be skipped if (skip_spectrum_) return; if (open_tags_.back() == "peaks") { //chars may be split to several chunks => concatenate them if (options_.getFillData()) { // Since we convert a Base64 string here, it can only contain plain ASCII sm_.appendASCII(chars, length, spectrum_data_.back().char_rest_); } } else if (open_tags_.back() == "offset" || open_tags_.back() == "indexOffset" || open_tags_.back() == "sha1") { } else if (open_tags_.back() == "precursorMz") { char* transcoded_chars = sm_.convert(chars); double mz_pos = asDouble_(transcoded_chars); //precursor m/z spectrum_data_.back().spectrum.getPrecursors().back().setMZ(mz_pos); //update window bounds - center them around the m/z pos double window_width = spectrum_data_.back().spectrum.getPrecursors().back().getIsolationWindowLowerOffset(); if (window_width != 0.0) { spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(0.5 * window_width); spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowUpperOffset(0.5 * window_width); } } else if (open_tags_.back() == "comment") { char* transcoded_chars = sm_.convert(chars); String parent_tag = *(open_tags_.end() - 2); //std::cout << "- Comment of parent " << parent_tag << "\n"; if (parent_tag == "msInstrument") { exp_->getInstrument().setMetaValue("#comment", String(transcoded_chars)); } else if (parent_tag == "dataProcessing") { //this is currently ignored } else if (parent_tag == "scan") { spectrum_data_.back().spectrum.setComment(transcoded_chars); } else if (String(transcoded_chars).trim() != "") { warning(LOAD, String("Unhandled comment '") + transcoded_chars + "' in element '" + open_tags_.back() + "'"); } } else { char* transcoded_chars = sm_.convert(chars); if (String(transcoded_chars).trim() != "") { warning(LOAD, String("Unhandled character content '") + transcoded_chars + "' in element '" + open_tags_.back() + "'"); } } } template <typename MapType> void MzXMLHandler<MapType>::writeTo(std::ostream& os) { //determine how many spectra there are (count only those with peaks) UInt count_tmp_ = 0; for (Size s = 0; s < cexp_->size(); s++) { const SpectrumType& spec = (*cexp_)[s]; if (spec.size() != 0) ++count_tmp_; } if (count_tmp_ == 0) ++count_tmp_; logger_.startProgress(0, cexp_->size(), "storing mzXML file"); os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n" << "<mzXML xmlns=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1\" " << "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" " << "xsi:schemaLocation=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1 " << "http://sashimi.sourceforge.net/schema_revision/mzXML_2.1/mzXML_idx_2.1.xsd\">\n" << "\t<msRun scanCount=\"" << count_tmp_ << "\">\n"; //---------------------------------------------------------------------------------------- // parent files //---------------------------------------------------------------------------------------- if (cexp_->getSourceFiles().empty()) { os << "\t\t<parentFile fileName=\"\" fileType=\"processedData\" fileSha1=\"0000000000000000000000000000000000000000\"/>\n"; } else { for (Size i = 0; i < cexp_->getSourceFiles().size(); ++i) { const SourceFile& sf = cexp_->getSourceFiles()[i]; os << "\t\t<parentFile fileName=\"" << sf.getNameOfFile() << "\" fileType=\""; //file type is an enum in mzXML => search for 'raw' string String tmp_string = sf.getFileType(); tmp_string.toLower(); if (tmp_string.hasSubstring("raw")) { os << "RAWData"; } else { os << "processedData"; } //Sha1 checksum must have 40 characters => create a fake if it is unknown os << "\" fileSha1=\""; tmp_string = sf.getChecksum(); if (sf.getChecksum().size() != 40 || sf.getChecksumType() != SourceFile::SHA1) { os << "0000000000000000000000000000000000000000"; } else { os << sf.getChecksum(); } os << "\"/>\n"; } } //---------------------------------------------------------------------------------------- //instrument //---------------------------------------------------------------------------------------- if (cexp_->getInstrument() != Instrument() || cexp_->getContacts().size() != 0) { const Instrument& inst = cexp_->getInstrument(); os << "\t\t<msInstrument>\n" << "\t\t\t<msManufacturer category=\"msManufacturer\" value=\"" << inst.getVendor() << "\"/>\n" << "\t\t\t<msModel category=\"msModel\" value=\"" << inst.getModel() << "\"/>\n"; if (inst.getIonSources().empty() || !inst.getIonSources()[0].getIonizationMethod()) { os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"\"/>\n"; } else { os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"" << cv_terms_[2][inst.getIonSources()[0].getIonizationMethod()] << "\"/>\n"; } const std::vector<MassAnalyzer>& analyzers = inst.getMassAnalyzers(); if (analyzers.empty() || !analyzers[0].getResolutionMethod()) { os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"\"/>\n"; } else { os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"" << cv_terms_[3][analyzers[0].getType()] << "\"/>\n"; } if (inst.getIonDetectors().empty() || !inst.getIonDetectors()[0].getType()) { os << "\t\t\t<msDetector category=\"msDetector\" value=\"\"/>\n"; } else { os << "\t\t\t<msDetector category=\"msDetector\" value=\"" << cv_terms_[4][inst.getIonDetectors()[0].getType()] << "\"/>\n"; } os << "\t\t\t<software type=\"acquisition\" name=\"" << inst.getSoftware().getName() << "\" version=\"" << inst.getSoftware().getVersion() << "\"/>\n"; if (analyzers.empty() || !analyzers[0].getResolutionMethod()) { os << "\t\t\t<msResolution category=\"msResolution\" value=\"\"/>\n"; } else { os << "\t\t\t<msResolution category=\"msResolution\" value=\"" << cv_terms_[5][analyzers[0].getResolutionMethod()] << "\"/>\n"; } if (cexp_->getContacts().size() > 0) { const ContactPerson& cont = cexp_->getContacts()[0]; os << "\t\t\t<operator first=\"" << cont.getFirstName() << "\" last=\"" << cont.getLastName() << "\""; if (cont.getEmail() != "") { os << " email=\"" << cont.getEmail() << "\""; } if (cont.getURL() != "") { os << " URI=\"" << cont.getURL() << "\""; } if (cont.metaValueExists("#phone")) { os << " phone=\"" << (String)(cont.getMetaValue("#phone")) << "\""; } os << "/>\n"; } writeUserParam_(os, inst, 3); if (inst.metaValueExists("#comment")) { os << "\t\t\t<comment>" << inst.getMetaValue("#comment") << "</comment>\n"; } os << "\t\t</msInstrument>\n"; } //---------------------------------------------------------------------------------------- //data processing (the information of the first spectrum is assigned to the whole file) //---------------------------------------------------------------------------------------- if (cexp_->size() == 0 || (*cexp_)[0].getDataProcessing().empty()) { os << "\t\t<dataProcessing>\n" << "\t\t\t<software type=\"processing\" name=\"\" version=\"\"/>\n" << "\t\t</dataProcessing>\n"; } else { for (Size i = 0; i < (*cexp_)[0].getDataProcessing().size(); ++i) { const DataProcessing& data_processing = (*cexp_)[0].getDataProcessing()[i]; os << "\t\t<dataProcessing deisotoped=\"" << data_processing.getProcessingActions().count(DataProcessing::DEISOTOPING) << "\" chargeDeconvoluted=\"" << data_processing.getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION) << "\" centroided=\"" << data_processing.getProcessingActions().count(DataProcessing::PEAK_PICKING) << "\""; if (data_processing.metaValueExists("#intensity_cutoff")) { os << " intensityCutoff=\"" << data_processing.getMetaValue("#intensity_cutoff").toString() << "\""; } os << ">\n" << "\t\t\t<software type=\""; if (data_processing.metaValueExists("#type")) { os << data_processing.getMetaValue("#type").toString(); } else { os << "processing"; } os << "\" name=\"" << data_processing.getSoftware().getName() << "\" version=\"" << data_processing.getSoftware().getVersion(); if (data_processing.getCompletionTime() != DateTime()) { os << "\" completionTime=\"" << data_processing.getCompletionTime().get().substitute(' ', 'T'); } os << "\"/>\n"; writeUserParam_(os, data_processing, 3, "processingOperation"); os << "\t\t</dataProcessing>\n"; } } //check if the nativeID of all spectra are numbers or numbers prefixed with 'scan=' //If not we need to renumber all spectra. bool all_numbers = true; bool all_empty = true; bool all_prefixed_numbers = true; for (Size s = 0; s < cexp_->size(); s++) { String native_id = (*cexp_)[s].getNativeID(); if (!native_id.hasPrefix("scan=")) { all_prefixed_numbers = false; } else { native_id = native_id.substr(5); } try { native_id.toInt(); } catch (Exception::ConversionError&) { all_numbers = false; all_prefixed_numbers = false; if (native_id != "") { all_empty = false; } } } //If we need to renumber and the nativeIDs were not empty, warn the user if (!all_numbers && !all_empty) { warning(STORE, "Not all spectrum native IDs are numbers or correctly prefixed with 'scan='. The spectra are renumbered and the native IDs are lost!"); } // write scans std::stack<UInt> open_scans; for (Size s = 0; s < cexp_->size(); s++) { logger_.setProgress(s); const SpectrumType& spec = (*cexp_)[s]; UInt ms_level = spec.getMSLevel(); open_scans.push(ms_level); Size spectrum_id = s + 1; if (all_prefixed_numbers) { spectrum_id = spec.getNativeID().substr(5).toInt(); } else if (all_numbers) { spectrum_id = spec.getNativeID().toInt(); } os << String(ms_level + 1, '\t') << "<scan num=\"" << spectrum_id << "\" msLevel=\"" << ms_level << "\" peaksCount=\"" << spec.size() << "\" polarity=\""; if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE) { os << "+"; } else if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE) { os << "-"; } else { os << "any"; } //scan type switch (spec.getInstrumentSettings().getScanMode()) { case InstrumentSettings::UNKNOWN: break; case InstrumentSettings::MASSSPECTRUM: case InstrumentSettings::MS1SPECTRUM: case InstrumentSettings::MSNSPECTRUM: if (spec.getInstrumentSettings().getZoomScan()) { os << "\" scanType=\"zoom"; } else { os << "\" scanType=\"Full"; } break; case InstrumentSettings::SIM: os << "\" scanType=\"SIM"; break; case InstrumentSettings::SRM: os << "\" scanType=\"SRM"; break; case InstrumentSettings::CRM: os << "\" scanType=\"CRM"; break; default: os << "\" scanType=\"Full"; warning(STORE, String("Scan type '") + InstrumentSettings::NamesOfScanMode[spec.getInstrumentSettings().getScanMode()] + "' not supported by mzXML. Using 'Full' scan mode!"); } // filter line if (spec.metaValueExists("filter string") ) { os << "\" filterLine=\""; os << writeXMLEscape ( (String)spec.getMetaValue("filter string") ); } // base peak mz (used by some programs like MAVEN), according to xsd: // "m/z of the base peak (most intense peak)" os << "\" basePeakMz=\""; double basePeakInt = 0; double basePeakMz = 0; for (Size j = 0; j < spec.size(); j++) { if (spec[j].getIntensity() > basePeakInt) { basePeakInt = spec[j].getIntensity(); basePeakMz = spec[j].getMZ(); } } os << basePeakMz; // retention time os << "\" retentionTime=\""; if (spec.getRT() < 0) os << "-"; os << "PT" << std::fabs(spec.getRT()) << "S\""; if (!spec.getInstrumentSettings().getScanWindows().empty()) { os << " startMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].begin << "\" endMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].end << "\""; } if (spec.getInstrumentSettings().getScanWindows().size() > 1) { warning(STORE, "The MzXML format can store only one scan window for each scan. Only the first one is stored!"); } // end of "scan" attributes os << ">\n"; for (Size i = 0; i < spec.getPrecursors().size(); ++i) { const Precursor& precursor = spec.getPrecursors()[i]; //intensity os << String(ms_level + 2, '\t') << "<precursorMz precursorIntensity=\"" << precursor.getIntensity(); //charge if (precursor.getCharge() != 0) os << "\" precursorCharge=\"" << precursor.getCharge(); //window size if (precursor.getIsolationWindowLowerOffset() + precursor.getIsolationWindowUpperOffset() > 0.0) os << "\" windowWideness=\"" << (precursor.getIsolationWindowUpperOffset() + precursor.getIsolationWindowLowerOffset()); //m/z os << "\">" << precursor.getMZ() << "</precursorMz>\n"; } if (!spec.empty()) { os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\">"; //std::cout << "Writing scan " << s << "\n"; std::vector<float> tmp; for (Size i = 0; i < spec.size(); i++) { tmp.push_back(spec[i].getMZ()); tmp.push_back(spec[i].getIntensity()); } String encoded; decoder_.encode(tmp, Base64::BYTEORDER_BIGENDIAN, encoded); os << encoded << "</peaks>\n"; } else { os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\" xsi:nil=\"true\"/>\n"; } writeUserParam_(os, spec, ms_level + 2); if (spec.getComment() != "") { os << String(ms_level + 2, '\t') << "<comment>" << spec.getComment() << "</comment>\n"; } //check MS level of next scan and close scans (scans can be nested) UInt next_ms_level = 0; if (s < cexp_->size() - 1) { next_ms_level = ((*cexp_)[s + 1]).getMSLevel(); } //std::cout << "scan: " << s << " this: " << ms_level << " next: " << next_ms_level << "\n"; if (next_ms_level <= ms_level) { for (Size i = 0; i <= ms_level - next_ms_level && !open_scans.empty(); ++i) { os << String(ms_level - i + 1, '\t') << "</scan>\n"; open_scans.pop(); } } } os << "\t</msRun>\n" << "\t<indexOffset>0</indexOffset>\n" << "</mzXML>\n"; logger_.endProgress(); spec_write_counter_ = 1; } } // namespace Internal } // namespace OpenMS #endif
Pragma.h
//===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PRAGMA_H #define LLVM_CLANG_LEX_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <cassert> namespace clang { class Preprocessor; class Token; class IdentifierInfo; class PragmaNamespace; /** * \brief Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * \brief The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * \brief The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * \brief The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: explicit PragmaHandler(StringRef name) : Name(name) {} PragmaHandler() {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return nullptr; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: explicit EmptyPragmaHandler(StringRef Name = StringRef()); void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. /// llvm::StringMap<PragmaHandler*> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} ~PragmaNamespace() override; /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. /// void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() { return Handlers.empty(); } void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; PragmaNamespace *getIfNamespace() override { return this; } }; } // end namespace clang #endif
convolution_3x3.c
/* * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* CSI-NN2 version 1.12.x */ #include "csi_thead_rvv.h" /************************************************************* note: VLEN = 128/256 ... *************************************************************/ /* padding input for winograd input transform , and change memory layout to [n c/4 h w 4] input layout: [n c h w] input_padded layout: [n c/packn h w packn] constrain: input channel % packn = 0 */ static void winograd_pad_input_pack1ton_fp32(const float *input, float *input_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad_top, int pad_left) { const int packn = csrr_vlenb() / sizeof(float); const int vl = vsetvl_e32m1(packn); int padded_hw = padded_h * padded_w; const int in_size = inh * inw; // per-channel size float *pad_ptr = input_padded; float *inp_ptr = (float *)input; int pad_down = padded_h - pad_top - inh; // remain to pad on h (pad_down) int pad_right = padded_w - pad_left - inw; // remain to pad on w (pad_right) vfloat32m1_t _zero = vfmv_v_f_f32m1(0.0f, vl); int c = 0; for (; c + packn - 1 < inc; c += packn) { inp_ptr = (float *)input + c * in_size; // pad h_top for (int i = 0; i < pad_top * padded_w; i++) { vse32_v_f32m1(pad_ptr, _zero, vl); pad_ptr += packn; } // pad h_mid for (int i = 0; i < inh; i++) { // pad w_left for (int j = 0; j < pad_left; j++) { vse32_v_f32m1(pad_ptr, _zero, vl); pad_ptr += packn; } // pad w_mid for (int j = 0; j < inw; j++) { vfloat32m1_t _tmp = vlse32_v_f32m1(inp_ptr, in_size * sizeof(float), vl); inp_ptr++; vse32_v_f32m1(pad_ptr, _tmp, vl); pad_ptr += packn; } // pad w_end for (int j = 0; j < pad_right; j++) { vse32_v_f32m1(pad_ptr, _zero, vl); pad_ptr += packn; } } // pad h_bottom for (int i = 0; i < pad_down * padded_w; i++) { vse32_v_f32m1(pad_ptr, _zero, vl); pad_ptr += packn; } } } static void winograd_crop_output_packnto1_fp32(const float *output_trans, float *output, int out_c, int out_h, int out_w, int wino_h, int wino_w) { const int packn = csrr_vlenb() / sizeof(float); const int vl = vsetvl_e32m1(packn); const int out_size = out_h * out_w; // per-channel size const int crop_size = wino_h * wino_w; float *out_tm_ptr = (float *)output_trans; float *out_ptr = output; int c = 0; for (; c + packn - 1 < out_c; c += packn) { out_tm_ptr = (float *)output_trans + c * crop_size; out_ptr = output + c * out_size; for (int h = 0; h < out_h; h++) { float *crop_ptr = out_tm_ptr + h * wino_w * packn; for (int w = 0; w < out_w; w++) { vfloat32m1_t _tmp = vle32_v_f32m1(crop_ptr, vl); crop_ptr += packn; vsse32_v_f32m1(out_ptr, out_size * sizeof(float), _tmp, vl); out_ptr++; } } } } /* packn = VLEN / 32 (128/32=4 or 256/32=8) constrain: output channel % packn = 0 input channel % packn = 0 kernel before: [O I 3*3] kernel after : [O/packn 8*8 I packn] */ void csi_nn_rvv_conv3x3s1_winograd64_transform_kernel_packn_fp32(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; float *kernel_data = (float *)o_kernel->data; // for kernel transform buf, 3x3 --> 8x8 float *kernel_tm = (float *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(float)); // kernel transform matrix: G const float ktm[8][3] = {{1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f}}; // const float ktm[8][3] = { // {1.0f, 0.0f, 0.0f}, // {-2.0f / 9, -2.0f / 9, -2.0f / 9}, // {-2.0f / 9, 2.0f / 9, -2.0f / 9}, // {1.0f / 90, 1.0f / 45, 2.0f / 45}, // {1.0f / 90, -1.0f / 45, 2.0f / 45}, // {32.0f / 45, 16.0f / 45, 8.0f / 45}, // {32.0f / 45, -16.0f / 45, 8.0f / 45}, // {0.0f, 0.0f, 1.0f} // }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float *kernel0 = kernel_data + p * inch * 9 + q * 9; float *kernel_tmp = kernel_tm + p * inch * 64 + q * 64; // transform kernel const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 8; j++) { float *tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tmp[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd64 const int packn = csrr_vlenb() / sizeof(float); float *kernel_tm_packn = (float *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(float)); t_kernel->data = kernel_tm_packn; for (int oc = 0; oc < outch / packn; oc++) { float *g0 = kernel_tm_packn + oc * 64 * inch * packn; for (int k = 0; k < 64; k++) { float *g00 = g0 + k * inch * packn; for (int ic = 0; ic < inch / packn; ic++) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float *k00 = kernel_tm + (oc * packn + j) * 64 * inch + (ic * packn + i) * 64; *g00++ = k00[k]; } } } } } csi_mem_free(kernel_tm); } /* n = VLEN / 32 constrain: output channel % n = 0 input channel % n = 0 */ int csi_nn_rvv_conv3x3s1_winograd64_packn_fp32(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { float *input_data = (float *)input->data; float *output_data = (float *)output->data; float *kernel_data = (float *)params->conv_extra.kernel_tm->data; float *bias_data = (float *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 5) / 6; int block_w = (out_w + 5) / 6; // block * 4 for alignment with 4,kernel = 3 * 3 ,stride = 1,thus input_size + 2 int padded_in_h = block_h * 6 + 2; int padded_in_w = block_w * 6 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (float *)csi_mem_alloc(out_c * sizeof(float)); } const int packn = csrr_vlenb() / sizeof(float); const int vl = vsetvl_e32m1(packn); for (int n = 0; n < batch; n++) { // pad buffer: [in_c/8 h w 8] float *input_padd_buf = (float *)csi_mem_alloc(in_c * padded_in_hw * sizeof(float)); // pad input winograd_pad_input_pack1ton_fp32(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/8, 64, blocks, 8] float *input_tm1_buf = (float *)csi_mem_alloc(in_c * block_h * block_w * 8 * 8 * sizeof(float)); /****************************** transform input *****************************/ /* BT = { { 1 0 -5.25 0 5.25 0 -1 0 }; { 0 1 1 -4.25 -4.25 1 1 0 }; { 0 -1 1 4.25 -4.25 -1 1 0 }; { 0 0.5 0.25 -2.5 -1.25 2 1 0 }; { 0 -0.5 0.25 2.5 -1.25 -2 1 0 }; { 0 2 4 -2.5 -5 0.5 1 0 }; { 0 -2 4 2.5 -5 -0.5 1 0 }; { 0 -1 0 5.25 0 -5.25 0 1 } }; */ int tiles = block_h * block_w; #pragma omp parallel for num_threads(1) for (int q = 0; q < in_c / packn; q++) { float *img0 = input_padd_buf + q * padded_in_h * padded_in_w * packn; // feature map after padding - q channel float *img0_tm = input_tm1_buf + q * 64 * tiles * packn; // transform and interleave - q channel float tmp[8][8][packn]; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { float *r0 = img0 + (i * padded_in_w * 6 + j * 6) * packn; // feature map after padding 8*8 start addr float *r0_tm = img0_tm + (i * block_w + j) * packn; // input_tm1 8*8 block start addr for (int m = 0; m < 8; m++) { vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl); vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn * 1, vl); vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl); vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl); vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl); vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl); vfloat32m1_t _r06 = vle32_v_f32m1(r0 + packn * 6, vl); vfloat32m1_t _r07 = vle32_v_f32m1(r0 + packn * 7, vl); vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r00, _r06, vl), 5.25f, vfsub_vv_f32m1(_r04, _r02, vl), vl); vfloat32m1_t _tmp7m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r07, _r01, vl), 5.25f, vfsub_vv_f32m1(_r03, _r05, vl), vl); vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat32m1_t _tmp1m = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _tmp2m = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _tmp34a = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat32m1_t _tmp34b = vfmacc_vf_f32m1( vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat32m1_t _tmp3m = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _tmp4m = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_r06, 4.f, vfmacc_vf_f32m1(_r02, -1.25f, _r04, vl), vl); vfloat32m1_t _tmp56b = vfmacc_vf_f32m1( vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat32m1_t _tmp5m = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl); vfloat32m1_t _tmp6m = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[7][m], _tmp7m, vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); vse32_v_f32m1(tmp[4][m], _tmp4m, vl); vse32_v_f32m1(tmp[5][m], _tmp5m, vl); vse32_v_f32m1(tmp[6][m], _tmp6m, vl); r0 += padded_in_w * packn; } for (int m = 0; m < 8; m++) { float *r0_tm0 = r0_tm; float *r0_tm1 = r0_tm0 + tiles * packn; float *r0_tm2 = r0_tm1 + tiles * packn; float *r0_tm3 = r0_tm2 + tiles * packn; float *r0_tm4 = r0_tm3 + tiles * packn; float *r0_tm5 = r0_tm4 + tiles * packn; float *r0_tm6 = r0_tm5 + tiles * packn; float *r0_tm7 = r0_tm6 + tiles * packn; vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl); vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl); vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f32m1(_tmp04, _tmp02, vl), vl); vfloat32m1_t _r0tm7 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f32m1(_tmp03, _tmp05, vl), vl); vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat32m1_t _r0tm1 = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _r0tm2 = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _tmp34a = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat32m1_t _tmp34b = vfmacc_vf_f32m1( vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat32m1_t _r0tm3 = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _r0tm4 = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _tmp56a = vfmacc_vf_f32m1( _tmp06, 4.f, vfmacc_vf_f32m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat32m1_t _tmp56b = vfmacc_vf_f32m1( vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat32m1_t _r0tm5 = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl); vfloat32m1_t _r0tm6 = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl); vse32_v_f32m1(r0_tm0, _r0tm0, vl); vse32_v_f32m1(r0_tm7, _r0tm7, vl); vse32_v_f32m1(r0_tm1, _r0tm1, vl); vse32_v_f32m1(r0_tm2, _r0tm2, vl); vse32_v_f32m1(r0_tm3, _r0tm3, vl); vse32_v_f32m1(r0_tm4, _r0tm4, vl); vse32_v_f32m1(r0_tm5, _r0tm5, vl); vse32_v_f32m1(r0_tm6, _r0tm6, vl); r0_tm += tiles * packn * 8; } } } } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf int size_input_tm2 = 0; if (tiles >= 8) { size_input_tm2 = 64 * (tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2) * in_c * 8; } else if (tiles >= 4) { size_input_tm2 = 64 * (tiles / 4 + (tiles % 4) / 2 + tiles % 2) * in_c * 4; } else if (tiles >= 2) { size_input_tm2 = 64 * (tiles / 2 + tiles % 2) * in_c * 2; } else { size_input_tm2 = 64 * tiles * in_c; } float *input_tm2_buf = (float *)csi_mem_alloc(size_input_tm2 * sizeof(float)); #pragma omp parallel for num_threads(1) for (int r = 0; r < 64; r++) { float *img_tm2 = input_tm2_buf + r * size_input_tm2 / 64; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { float *tm2 = img_tm2 + t * in_c; // img_tm2 row data float *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * packn; for (int q = 0; q < in_c / packn; q++) { vfloat32m1_t _tmp0 = vle32_v_f32m1(tm1, vl); vfloat32m1_t _tmp1 = vle32_v_f32m1(tm1 + packn * 1, vl); vfloat32m1_t _tmp2 = vle32_v_f32m1(tm1 + packn * 2, vl); vfloat32m1_t _tmp3 = vle32_v_f32m1(tm1 + packn * 3, vl); vfloat32m1_t _tmp4 = vle32_v_f32m1(tm1 + packn * 4, vl); vfloat32m1_t _tmp5 = vle32_v_f32m1(tm1 + packn * 5, vl); vfloat32m1_t _tmp6 = vle32_v_f32m1(tm1 + packn * 6, vl); vfloat32m1_t _tmp7 = vle32_v_f32m1(tm1 + packn * 7, vl); vsseg8e32_v_f32m1(tm2, _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7, vl); tm1 += 64 * tiles * packn; tm2 += 8 * packn; } } for (; t + 3 < tiles; t += 4) { float *tm2 = img_tm2 + (t / 8 + (t % 8) / 4) * in_c * 8; // img_tm2 row data float *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * packn; for (int q = 0; q < in_c / packn; q++) { vfloat32m1_t _tmp0 = vle32_v_f32m1(tm1, vl); vfloat32m1_t _tmp1 = vle32_v_f32m1(tm1 + packn * 1, vl); vfloat32m1_t _tmp2 = vle32_v_f32m1(tm1 + packn * 2, vl); vfloat32m1_t _tmp3 = vle32_v_f32m1(tm1 + packn * 3, vl); vsseg4e32_v_f32m1(tm2, _tmp0, _tmp1, _tmp2, _tmp3, vl); tm1 += 64 * tiles * packn; tm2 += 4 * packn; } } for (; t + 1 < tiles; t += 2) { float *tm2 = img_tm2 + (t / 8 + (t % 8) / 4 + (t % 4) / 2) * in_c * 8; // img_tm2 row data float *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * packn; for (int q = 0; q < in_c / packn; q++) { vfloat32m1_t _tmp0 = vle32_v_f32m1(tm1, vl); vfloat32m1_t _tmp1 = vle32_v_f32m1(tm1 + packn * 1, vl); vsseg2e32_v_f32m1(tm2, _tmp0, _tmp1, vl); tm1 += 64 * tiles * packn; tm2 += 2 * packn; } } for (; t < tiles; t++) { float *tm2 = img_tm2 + (t / 8 + (t % 8) / 4 + (t % 4) / 2 + t % 2) * in_c * 8; // img_tm2 row data float *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * packn; for (int q = 0; q < in_c / packn; q++) { vfloat32m1_t _tmp0 = vle32_v_f32m1(tm1, vl); vse32_v_f32m1(tm2, _tmp0, vl); tm1 += 64 * tiles * packn; tm2 += 1 * packn; } } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/packn, 64, blocks, packn] float *output_dot_buf = (float *)csi_mem_alloc(out_c * block_h * block_w * 8 * 8 * sizeof(float)); #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / packn; p++) { float *output0_tm = output_dot_buf + p * 64 * tiles * packn; // 4 channel dot output float *kernel0_tm = kernel_data + p * 64 * in_c * packn; // 4 channel kernel for (int r = 0; r < 64; r++) { float *img_tm2 = input_tm2_buf + r * size_input_tm2 / 64; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { float *r0 = img_tm2 + t * in_c; float *k0 = kernel0_tm + r * in_c * packn; vfloat32m1_t _acc0 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc1 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc2 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc3 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc4 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc5 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc6 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc7 = vfmv_v_f_f32m1(0.0f, vl); for (int c = 0; c < in_c; c++) { vfloat32m1_t _kernel = vle32_v_f32m1(k0, vl); k0 += packn; _acc0 = vfmacc_vf_f32m1(_acc0, r0[0], _kernel, vl); _acc1 = vfmacc_vf_f32m1(_acc1, r0[1], _kernel, vl); _acc2 = vfmacc_vf_f32m1(_acc2, r0[2], _kernel, vl); _acc3 = vfmacc_vf_f32m1(_acc3, r0[3], _kernel, vl); _acc4 = vfmacc_vf_f32m1(_acc4, r0[4], _kernel, vl); _acc5 = vfmacc_vf_f32m1(_acc5, r0[5], _kernel, vl); _acc6 = vfmacc_vf_f32m1(_acc6, r0[6], _kernel, vl); _acc7 = vfmacc_vf_f32m1(_acc7, r0[7], _kernel, vl); r0 += 8; } vse32_v_f32m1(output0_tm, _acc0, vl); vse32_v_f32m1(output0_tm + packn * 1, _acc1, vl); vse32_v_f32m1(output0_tm + packn * 2, _acc2, vl); vse32_v_f32m1(output0_tm + packn * 3, _acc3, vl); vse32_v_f32m1(output0_tm + packn * 4, _acc4, vl); vse32_v_f32m1(output0_tm + packn * 5, _acc5, vl); vse32_v_f32m1(output0_tm + packn * 6, _acc6, vl); vse32_v_f32m1(output0_tm + packn * 7, _acc7, vl); output0_tm += packn * 8; } for (; t + 3 < tiles; t += 4) { float *r0 = img_tm2 + (t / 8 + (t % 8) / 4) * in_c * 8; float *k0 = kernel0_tm + r * in_c * packn; vfloat32m1_t _acc0 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc1 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc2 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc3 = vfmv_v_f_f32m1(0.0f, vl); for (int c = 0; c < in_c; c++) { vfloat32m1_t _kernel = vle32_v_f32m1(k0, vl); k0 += packn; _acc0 = vfmacc_vf_f32m1(_acc0, r0[0], _kernel, vl); _acc1 = vfmacc_vf_f32m1(_acc1, r0[1], _kernel, vl); _acc2 = vfmacc_vf_f32m1(_acc2, r0[2], _kernel, vl); _acc3 = vfmacc_vf_f32m1(_acc3, r0[3], _kernel, vl); r0 += 4; } vse32_v_f32m1(output0_tm, _acc0, vl); vse32_v_f32m1(output0_tm + packn * 1, _acc1, vl); vse32_v_f32m1(output0_tm + packn * 2, _acc2, vl); vse32_v_f32m1(output0_tm + packn * 3, _acc3, vl); output0_tm += packn * 4; } for (; t + 1 < tiles; t += 2) { float *r0 = img_tm2 + (t / 8 + (t % 8) / 4 + (t % 4) / 2) * in_c * 8; float *k0 = kernel0_tm + r * in_c * packn; vfloat32m1_t _acc0 = vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t _acc1 = vfmv_v_f_f32m1(0.0f, vl); for (int c = 0; c < in_c; c++) { vfloat32m1_t _kernel = vle32_v_f32m1(k0, vl); k0 += packn; _acc0 = vfmacc_vf_f32m1(_acc0, r0[0], _kernel, vl); _acc1 = vfmacc_vf_f32m1(_acc1, r0[1], _kernel, vl); r0 += 2; } vse32_v_f32m1(output0_tm, _acc0, vl); vse32_v_f32m1(output0_tm + packn * 1, _acc1, vl); output0_tm += packn * 2; } for (; t < tiles; t++) { float *r0 = img_tm2 + (t / 8 + (t % 8) / 4 + (t % 4) / 2 + t % 2) * in_c * 8; float *k0 = kernel0_tm + r * in_c * packn; vfloat32m1_t _acc0 = vfmv_v_f_f32m1(0.0f, vl); for (int c = 0; c < in_c; c++) { vfloat32m1_t _kernel = vle32_v_f32m1(k0, vl); k0 += packn; _acc0 = vfmacc_vf_f32m1(_acc0, r0[0], _kernel, vl); r0 += 1; } vse32_v_f32m1(output0_tm, _acc0, vl); output0_tm += packn * 1; } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/packn, out_h6, out_w6, packn] float *output_tm1_buf = (float *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(float)); /* AT = { { 1 1 1 1 1 1 1 0 }; { 0 1 -1 2 -2 1/2 -1/2 0 }; { 0 1 1 4 4 1/4 1/4 0 }; { 0 1 -1 8 -8 1/8 -1/8 0 }; { 0 1 1 16 16 1/16 1/16 0 }; { 0 1 -1 32 -32 1/32 -1/32 1 } }; AT = { { 1 1 1 1 1 32 32 0 }; { 0 1 -1 2 -2 16 -16 0 }; { 0 1 1 4 4 8 8 0 }; { 0 1 -1 8 -8 4 -4 0 }; { 0 1 1 16 16 2 2 0 }; { 0 1 -1 32 -32 1 -1 1 } }; */ #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / packn; p++) { float *bias_tmp = bias_data + p * packn; float *out0_tm = output_dot_buf + p * 64 * block_h * block_w * packn; // 输出转换前/dot后 第p个channel float *out0 = output_tm1_buf + p * 6 * block_h * 6 * block_w * packn; // 转换后输出 第p个channel float tmp[6][8][packn]; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { float *output0_tm_0 = out0_tm + (i * block_w + j) * packn; // 8*8 起始地址 float *output0_tm_1 = output0_tm_0 + tiles * packn * 1; float *output0_tm_2 = output0_tm_0 + tiles * packn * 2; float *output0_tm_3 = output0_tm_0 + tiles * packn * 3; float *output0_tm_4 = output0_tm_0 + tiles * packn * 4; float *output0_tm_5 = output0_tm_0 + tiles * packn * 5; float *output0_tm_6 = output0_tm_0 + tiles * packn * 6; float *output0_tm_7 = output0_tm_0 + tiles * packn * 7; float *output0 = out0 + (i * block_w * 6 * 6 + j * 6) * packn; // 输出 6*6 的起始地址 for (int m = 0; m < 8; m++) { vfloat32m1_t _r00 = vle32_v_f32m1(output0_tm_0, vl); vfloat32m1_t _r01 = vle32_v_f32m1(output0_tm_1, vl); vfloat32m1_t _r02 = vle32_v_f32m1(output0_tm_2, vl); vfloat32m1_t _r03 = vle32_v_f32m1(output0_tm_3, vl); vfloat32m1_t _r04 = vle32_v_f32m1(output0_tm_4, vl); vfloat32m1_t _r05 = vle32_v_f32m1(output0_tm_5, vl); vfloat32m1_t _r06 = vle32_v_f32m1(output0_tm_6, vl); vfloat32m1_t _r07 = vle32_v_f32m1(output0_tm_7, vl); vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_r01, _r02, vl); vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_r01, _r02, vl); vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_r03, _r04, vl); vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_r03, _r04, vl); vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_r05, _r06, vl); vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_r05, _r06, vl); vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_r00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat32m1_t _tmp2m = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat32m1_t _tmp4m = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vfloat32m1_t _tmp1m = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat32m1_t _tmp3m = vfmacc_vf_f32m1( vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat32m1_t _tmp5m = vfadd_vv_f32m1(vfadd_vv_f32m1(_r07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vse32_v_f32m1(tmp[4][m], _tmp4m, vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); vse32_v_f32m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } vfloat32m1_t _bias = vle32_v_f32m1(bias_tmp, vl); for (int m = 0; m < 6; m++) { vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl); vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl); vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_tmp05, _tmp06, vl); vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_tmp05, _tmp06, vl); vfloat32m1_t _output00 = vfadd_vv_f32m1( _bias, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat32m1_t _output02 = vfadd_vv_f32m1( _bias, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat32m1_t _output04 = vfadd_vv_f32m1( _bias, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vfloat32m1_t _output01 = vfadd_vv_f32m1( _bias, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat32m1_t _output03 = vfadd_vv_f32m1( _bias, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat32m1_t _output05 = vfadd_vv_f32m1( _bias, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse32_v_f32m1(output0, _output00, vl); vse32_v_f32m1(output0 + packn * 2, _output02, vl); vse32_v_f32m1(output0 + packn * 4, _output04, vl); vse32_v_f32m1(output0 + packn * 1, _output01, vl); vse32_v_f32m1(output0 + packn * 3, _output03, vl); vse32_v_f32m1(output0 + packn * 5, _output05, vl); output0 += block_w * 6 * packn; } } } } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) winograd_crop_output_packnto1_fp32(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 6, block_w * 6); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; }
GB_binop__ge_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_01__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_02__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_03__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_bool) // A*D function (colscale): GB (_AxD__ge_bool) // D*A function (rowscale): GB (_DxB__ge_bool) // C+=B function (dense accum): GB (_Cdense_accumB__ge_bool) // C+=b function (dense accum): GB (_Cdense_accumb__ge_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_bool) // C=scalar+B GB (_bind1st__ge_bool) // C=scalar+B' GB (_bind1st_tran__ge_bool) // C=A+scalar GB (_bind2nd__ge_bool) // C=A'+scalar GB (_bind2nd_tran__ge_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ge_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
strassen.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /**********************************************************************************************/ /* * Copyright (c) 1996 Massachusetts Institute of Technology * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to use, copy, modify, and distribute the Software without * restriction, provided the Software, including any modified copies made * under this license, is not distributed for a fee, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Except as contained in this notice, the name of the Massachusetts * Institute of Technology shall not be used in advertising or otherwise * to promote the sale, use or other dealings in this Software without * prior written authorization from the Massachusetts Institute of * Technology. * */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "app-desc.h" #include "bots.h" #include "strassen.h" /*********************************************************************** * Naive sequential algorithm, for comparison purposes **********************************************************************/ void matrixmul(int n, REAL *A, int an, REAL *B, int bn, REAL *C, int cn) { int i, j, k; REAL s; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { s = 0.0; for (k = 0; k < n; ++k) s += ELEM(A, an, i, k) * ELEM(B, bn, k, j); ELEM(C, cn, i, j) = s; } } } /***************************************************************************** ** ** FastNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C = A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.) ** *****************************************************************************/ void FastNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL FirstARowValue = *ARowStart++; REAL Sum0 = FirstARowValue * (*BColumnStart); REAL Sum1 = FirstARowValue * (*(BColumnStart+1)); REAL Sum2 = FirstARowValue * (*(BColumnStart+2)); REAL Sum3 = FirstARowValue * (*(BColumnStart+3)); REAL Sum4 = FirstARowValue * (*(BColumnStart+4)); REAL Sum5 = FirstARowValue * (*(BColumnStart+5)); REAL Sum6 = FirstARowValue * (*(BColumnStart+6)); REAL Sum7 = FirstARowValue * (*(BColumnStart+7)); unsigned Products; for (Products = 1; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** FastAdditiveNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C += A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C READ/WRITE) Matrix C contains C + A x B. ** *****************************************************************************/ void FastAdditiveNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL Sum0 = *C; REAL Sum1 = *(C+1); REAL Sum2 = *(C+2); REAL Sum3 = *(C+3); REAL Sum4 = *(C+4); REAL Sum5 = *(C+5); REAL Sum6 = *(C+6); REAL Sum7 = *(C+7); unsigned Products; for (Products = 0; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** MultiplyByDivideAndConquer ** ** For medium to medium-large (would you like fries with that) sized ** matrices A, B, and C of size MatrixSize * MatrixSize this function ** efficiently performs the operation ** C = A x B (if AdditiveMode == 0) ** C += A x B (if AdditiveMode != 0) ** ** Note MatrixSize must be divisible by 16. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** AdditiveMode = 0 if we want C = A x B, otherwise we'll do C += A x B ** ** OUTPUT: ** C (+)= A x B. (+ if AdditiveMode != 0) ** *****************************************************************************/ void MultiplyByDivideAndConquer(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int AdditiveMode ) { REAL *A01, *A10, *A11, *B01, *B10, *B11, *C01, *C10, *C11; unsigned QuadrantSize = MatrixSize >> 1; /* partition the matrix */ A01 = A + QuadrantSize; A10 = A + RowWidthA * QuadrantSize; A11 = A10 + QuadrantSize; B01 = B + QuadrantSize; B10 = B + RowWidthB * QuadrantSize; B11 = B10 + QuadrantSize; C01 = C + QuadrantSize; C10 = C + RowWidthC * QuadrantSize; C11 = C10 + QuadrantSize; if (QuadrantSize > SizeAtWhichNaiveAlgorithmIsMoreEfficient) { MultiplyByDivideAndConquer(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); } else { if (AdditiveMode) { FastAdditiveNaiveMatrixMultiply(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } else { FastNaiveMatrixMultiply(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } FastAdditiveNaiveMatrixMultiply(C, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } return; } /***************************************************************************** ** ** OptimizedStrassenMultiply ** ** For large matrices A, B, and C of size MatrixSize * MatrixSize this ** function performs the operation C = A x B efficiently. ** ** INPUT: ** C = (*C WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.) ** *****************************************************************************/ void OptimizedStrassenMultiply_seq(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; const int NumberOfVariables = 11; PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ A12 = A + QuadrantSize; B12 = B + QuadrantSize; C12 = C + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = (char *)malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ /* M2 = A11 x B11 */ OptimizedStrassenMultiply_seq(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ OptimizedStrassenMultiply_seq(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ OptimizedStrassenMultiply_seq(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ OptimizedStrassenMultiply_seq(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ OptimizedStrassenMultiply_seq(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ OptimizedStrassenMultiply_seq(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ OptimizedStrassenMultiply_seq(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C)) += LocalM2_0; (*(C+1)) += LocalM2_1; (*(C+2)) += LocalM2_2; (*(C+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C += 4; C12 += 4; C21 += 4; C22 += 4; } C = (REAL*) ( ((PTR) C ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; const int NumberOfVariables = 11; PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ A12 = A + QuadrantSize; B12 = B + QuadrantSize; C12 = C + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = (char *)malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ /* M2 = A11 x B11 */ #pragma omp task untied OptimizedStrassenMultiply_par(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ #pragma omp task untied OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ #pragma omp task untied OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ #pragma omp task untied OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ #pragma omp task untied OptimizedStrassenMultiply_par(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ #pragma omp task untied OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ #pragma omp task untied OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /********************************************** ** Synchronization Point **********************************************/ #pragma omp taskwait ; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C)) += LocalM2_0; (*(C+1)) += LocalM2_1; (*(C+2)) += LocalM2_2; (*(C+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C += 4; C12 += 4; C21 += 4; C22 += 4; } C = (REAL*) ( ((PTR) C ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } /* * Set an n by n matrix A to random values. The distance between * rows is an */ void init_matrix(int n, REAL *A, int an) { int i, j; for (i = 0; i < n; ++i) for (j = 0; j < n; ++j) ELEM(A, an, i, j) = ((double) rand()) / (double) RAND_MAX; } /* * Compare two matrices. Print an error message if they differ by * more than EPSILON. */ int compare_matrix(int n, REAL *A, int an, REAL *B, int bn) { int i, j; REAL c; for (i = 0; i < n; ++i) for (j = 0; j < n; ++j) { /* compute the relative error c */ c = ELEM(A, an, i, j) - ELEM(B, bn, i, j); if (c < 0.0) c = -c; c = c / ELEM(A, an, i, j); if (c > EPSILON) { bots_message("Strassen: Wrong answer!\n"); return BOTS_RESULT_UNSUCCESSFUL; } } return BOTS_RESULT_SUCCESSFUL; } /* * Allocate a matrix of side n (therefore n^2 elements) */ REAL *alloc_matrix(int n) { return (REAL *)malloc(n * n * sizeof(REAL)); } void strassen_main_par(REAL *A, REAL *B, REAL *C, int n) { bots_message("Computing parallel Strassen algorithm (n=%d) ", n); const unsigned long long full_program_start = current_time_ns(); { #pragma omp parallel { #pragma omp single { #pragma omp task untied OptimizedStrassenMultiply_par(C, A, B, n, n, n, n, 1); } } bots_message(" completed!\n"); } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); } void strassen_main_seq(REAL *A, REAL *B, REAL *C, int n) { bots_message("Computing sequential Strassen algorithm (n=%d) ", n); OptimizedStrassenMultiply_seq(C, A, B, n, n, n, n, 1); bots_message(" completed!\n"); }
data.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "data.h" #include "../input/dataHamming.h" #include "../input/dataEuclidean.h" #include "../input/dataDistanceMatrix.h" /*void Print(value* v) { if (v == NULL || v->name == NULL) printf("NULL"); printf("%s", v->name); } */ struct dataAct_t { value* data; int dataSize; int centroidsIndex; // array index :data[centroidsIndex] contains // the LAST centroid // The FIRST centroid is in data[dataSize-1] int* clustIndexes; // array of size K. clustIndexes[i] contains // the location of the first point in cluster i. // NOTE: the last element of the last // cluster is in data[centroidsIndex-1] int centroidNum; // Number of centroids currently in the data // array. Always <=k int k; // All data (points, cluster numbers, centroids are // First centroid lies in data[dataSize-k] }; typedef struct clustExtra_t { int flagAssign; int clust; double clustDist; int secClust; double secClustDist; int secFlagAssign; } clustExtra; Data dataAct; extern dataIF data; /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ClustExtra * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ void InitClustExtra(int point) { value* pointVal = &dataAct.data[point]; clustExtra* val; val = malloc(sizeof(clustExtra)); val->clust = -1; val->clustDist = -1; val->flagAssign = 0; val->secClust = -1; val->secClustDist = -1; val->secFlagAssign = -1; val->secFlagAssign = 0; pointVal->clustExtra = val; } void PrintClusters(FILE* output){ int i=0; int j=0; for(i=0;i<dataAct.k;i++){ int clustMin = dataAct.clustIndexes[i]; int clustMax = 0; if (i == dataAct.k - 1) clustMax = dataAct.centroidsIndex ; else clustMax = dataAct.clustIndexes[i + 1]; value clustVal; GetIthCentroid(i,&clustVal); int size=clustMax-clustMin; if(size<0) size=0; fprintf(output,"CLUSTER-%d {size: %d, medoid: %s}\n",i, size, clustVal.name); } } int GetClustSize(int i){ int clustMin = dataAct.clustIndexes[i]; int clustMax = 0; if (i == dataAct.k - 1) clustMax = dataAct.centroidsIndex ; else clustMax = dataAct.clustIndexes[i + 1]; int size=clustMax-clustMin; if(size<0) size=0; return size+1; } void PrintClustersComplete(FILE* output){ int i=0; int j=0; for(i=0;i<dataAct.k;i++){ int clustMin = dataAct.clustIndexes[i]; int clustMax = 0; if (i == dataAct.k - 1) clustMax = dataAct.centroidsIndex ; else clustMax = dataAct.clustIndexes[i + 1]; value clustVal; GetIthCentroid(i,&clustVal); int size=clustMax-clustMin; if(size<0) size=0; fprintf(output,"CLUSTER-%d {size: %d, medoid: %s}\n",i, size, clustVal.name); if(clustMin<clustMax) fprintf(output,"CLUSTER-CONTENTS: {"); else fprintf(output,"CLUSTER-CONTENTS: (empty)"); for(j=clustMin;j<clustMax;j++){ value clustDat; GetIthData(j,&clustDat); fprintf(output,"%s,",clustDat.name); } if(clustMin<clustMax) fprintf(output,"}"); fprintf(output,"\n"); } } void AssignClustExtra(int point, int clust, double clustDist, int secClust, double secClustDist) { value pointVal; GetIthData(point, &pointVal); // if(pointVal.clustExtra==NULL) // den exoun noima... den ginontai init // return; clustExtra* info = pointVal.clustExtra; info->clust = clust; info->clustDist = clustDist; info->secClust = secClust; info->secClustDist = secClustDist; } double GetDistFirstCentr(int point) { value pointVal; GetIthData(point, &pointVal); //if (pointVal.clustExtra == NULL) return -1; return ((clustExtra*)pointVal.clustExtra)->clustDist; } double GetDistSecCentr(int point) { value pointVal; GetIthData(point, &pointVal); //if (pointVal.clustExtra == NULL) return -1; return ((clustExtra*)pointVal.clustExtra)->secClustDist; } int GetExtrasCluster(int point){ value pointVal; GetIthData(point, &pointVal); //if (pointVal.clustExtra == NULL) return -1; return ((clustExtra*)pointVal.clustExtra)->clust; } // void UpdateClustExtra(int point,int clust,double clustDist){ // // value* pointVal=GetIthData(point); // // if(pointVal == NULL || pointVal->clustExtra==NULL) // return; // // clustExtra* info=pointVal->clustExtra; // // if(clustDist < info->secClustDist ){ // // if(clustDist < info->clustDist){ // info->secClustDist=info->clustDist; // info->secClust=info->clust; // info->clust=clust; // info->clustDist=clustDist; // } // else{ // info->secClustDist=clustDist; // info->secClust=clust; // } // } //} int IsAssigned(int point) { value pointVal; GetIthData(point, &pointVal); //if (pointVal.clustExtra == NULL) return -1; if ((((clustExtra*)(pointVal.clustExtra))->flagAssign == 1) && (((clustExtra*)(pointVal.clustExtra))->secFlagAssign == 1)) { return 1; } return 0; } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ClustExtra * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cluster * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ void Swap(int ind1, int ind2) { if (ind1 >= dataAct.dataSize || ind2 >= dataAct.dataSize) printf("Swap indexes over datasize\n"); if (ind1 < 0 || ind2 < 0) printf("Swap indexes under zero\n"); value v; if (ind1 == ind2) return; memcpy(&v, &dataAct.data[ind1], sizeof(value)); memcpy(&dataAct.data[ind1], &dataAct.data[ind2], sizeof(value)); memcpy(&dataAct.data[ind2], &v, sizeof(value)); } int GetMedoid(int cluster) { int clustMin = dataAct.clustIndexes[cluster]; int clustMax; int i, j, minDistIndex; double sumDist = 0, minDist; if (cluster == dataAct.k - 1) clustMax = dataAct.centroidsIndex - 1; else clustMax = dataAct.clustIndexes[cluster + 1] - 1; double* avgDist = malloc(sizeof(double) * (clustMax - clustMin + 1)); minDist = 99999999999; minDistIndex = 0; for (i = clustMin; i < clustMax + 1; ++i) { for (j = clustMin; j < clustMax + 1; ++j) { if (i == j) continue; sumDist += data.distance(&dataAct.data[i], &dataAct.data[j]); if(sumDist > minDist) continue; } if(sumDist > minDist){ sumDist = 0; continue; } value centroid; GetIthCentroid( cluster, &centroid); minDist = sumDist; minDistIndex = i; sumDist = 0; } free(avgDist); return minDistIndex; } // does not preserve the cluster functionality int AddCentroid(int centrIndex) { if (centrIndex > dataAct.dataSize) return -1; dataAct.centroidNum++; dataAct.k++; dataAct.centroidsIndex--; Swap(centrIndex, dataAct.centroidsIndex); return dataAct.centroidsIndex; } void FirstAssignment() { int offset = (int)( (dataAct.dataSize - dataAct.k) / (float)dataAct.k); int i = 0; dataAct.clustIndexes = malloc(sizeof(int) * dataAct.k); for (i = 0; i < dataAct.k; i++) { dataAct.clustIndexes[i] = i * offset; } for (i = 0; i < dataAct.dataSize; ++i) InitClustExtra(i); } int AddToClust(int index, int clustNum) { int i = 0; if (index < 0) printf("aaaaaaaaaaaaaaaaaaaaaaaaa\n"); int pointCluster = GetClusterOf2(index); //printf("clust %d\n",GetClusterOf(index)); if (clustNum > dataAct.k - 1) return -1; int clustMin = dataAct.clustIndexes[clustNum]; int clustMax = 0; if (clustNum == dataAct.k - 1){ clustMax = dataAct.centroidsIndex - 1; //printf("%d\n", clustMax); } else clustMax = dataAct.clustIndexes[clustNum + 1] - 1; // while( index > clustMax +1) //if(pointCluster != clustNum){ if (index > clustMax ){ //moves left for (i = pointCluster; i > clustNum; i--) { //printf("%d\n", dataAct.clustIndexes[pointCluster]); Swap(index, dataAct.clustIndexes[pointCluster]); dataAct.clustIndexes[pointCluster]++; index = dataAct.clustIndexes[pointCluster] - 1; pointCluster--; } return 0; } else if (index < clustMin ){ // moves right for (i = pointCluster; i < clustNum; i++) { //printf("%d,%d\n",index, dataAct.clustIndexes[pointCluster]); Swap(index, dataAct.clustIndexes[pointCluster + 1] - 1); dataAct.clustIndexes[pointCluster + 1]--; index = dataAct.clustIndexes[pointCluster + 1]; pointCluster++; } return 1; } else{ //printf("aa %d\n", clustNum); return 0; } //} } int GetClusterOf(int index) { value v; GetIthData(index, &v); clustExtra* extra = v.clustExtra; int ret = extra->clust; return ret; } int GetClusterOf2(int index) { int i; for (i = 0; i < GetNoOfCluster(); ++i) { if (i == GetNoOfCluster() - 1) return i; if (dataAct.clustIndexes[i] <= index && dataAct.clustIndexes[i + 1] > index) return i; } return -1; } void SwapCentroid(int medoid, int oldCentr) { Swap(medoid, oldCentr + dataAct.centroidsIndex); } double ComputeAproximateDJ(int oldCentr, int centroid, int *sample, int sSize) { // wakawaka wikiwikiwoooooosh int i = 0; double DJ = 0.0; value v1, v2; GetIthData(centroid, &v2); for (i = 0; i < sSize; i++) { GetIthData(sample[i], &v1); double iFromCentr = data.distance(&v1, &v2); if (GetClusterOf(sample[i]) == oldCentr) { if (iFromCentr >= GetDistSecCentr(i)) DJ += GetDistSecCentr(sample[i]) - GetDistFirstCentr(sample[i]); else DJ += iFromCentr - GetDistFirstCentr(sample[i]); } else if (iFromCentr < GetDistFirstCentr(sample[i])) DJ += iFromCentr - GetDistFirstCentr(sample[i]); } double bestDist = 9999999999; GetIthCentroid( oldCentr, &v2); for (i = 0; i < dataAct.k; ++i) { if ( i == oldCentr) continue; GetIthCentroid(i, &v1); double dist = data.distance(&v1, &v2); if ( dist < bestDist) { bestDist = dist; } } GetIthData( centroid, &v1); double dist = data.distance(&v1, &v2); if (dist >= bestDist) DJ += bestDist; else DJ += dist; return DJ; return -1; } double ComputeDJ(int oldCentr, int centroid) { // wakawaka wikiwikiwoooooosh int i = 0; double DJ = 0.0; value v1, v2; for (i = 0; i < GetDataSize() - dataAct.k; i++) { GetIthData(i, &v1); GetIthData(centroid, &v2); double iFromCentr = data.distance(&v1, &v2); if (GetClusterOf(i) == oldCentr) { if (iFromCentr >= GetDistSecCentr(i)) DJ += GetDistSecCentr(i) - GetDistFirstCentr(i); else DJ += iFromCentr - GetDistFirstCentr(i); } else if (iFromCentr < GetDistFirstCentr(i)) DJ += iFromCentr - GetDistFirstCentr(i); } double bestDist = 9999999999; int bestPos = -1; GetIthCentroid( oldCentr, &v2); for (i = 0; i < dataAct.k; ++i) { if ( i == oldCentr) continue; GetIthCentroid(i, &v1); double dist = data.distance(&v1, &v2); if ( dist < bestDist) { bestDist = dist; bestPos = i; } } GetIthData( centroid, &v1); double dist = data.distance(&v1, &v2); if (dist >= bestDist) DJ += bestDist; else DJ += dist; return DJ; return -1; } void InitAssigned() { int i; for (i = 0; i < GetDataSize(); ++i){ value* pointVal = &dataAct.data[i]; clustExtra* val; val = pointVal->clustExtra; val->clust = -1; val->clustDist = -1; val->flagAssign = 0; val->secClust = -1; val->secClustDist = -1; val->secFlagAssign = -1; val->secFlagAssign = 0; } } double MinDistCendroids() { int i,j; double dist,minDist = 99999999999; value centr1,centr2; for(i=0; i<dataAct.k; i++){ GetIthCentroid(i,&centr1); for(j=0;j<dataAct.k; j++){ if(i==j) continue; GetIthCentroid(j,&centr2); dist = data.distance(&centr1,&centr2); if(dist < minDist) minDist = dist; } } return minDist; } double MaxDistCendroids() { int i,j; double dist,maxDist = 0; value centr1,centr2; for(i=0; i<dataAct.k; i++){ GetIthCentroid(i,&centr1); for(j=0;j<dataAct.k; j++){ if(i==j) continue; GetIthCentroid(j,&centr2); dist = data.distance(&centr1,&centr2); if(dist > maxDist) maxDist = dist; } } return maxDist; } int AssignLSH(value *v, int dist, int clustNum, int loop){ clustExtra* extra = v->clustExtra; //mark 1st assigned int ret = extra->flagAssign && extra->secFlagAssign; if(extra->flagAssign == 0){ extra->clust = clustNum; extra->clustDist = dist; extra->flagAssign = 1; return 0; } //make second first and first dist/clustNum if(extra->clustDist > dist){ extra->secClust = extra->clust; extra->secClustDist = extra->clustDist; extra->clust = clustNum; extra->clustDist = dist; extra->secFlagAssign = 1; return !ret; } if(extra->secFlagAssign == 0){ if(extra->clust!=clustNum){ extra->secClust = clustNum; extra->secClustDist = dist; extra->secFlagAssign = 1; return !ret; } return 0; } if(extra->secClustDist > dist){ extra->clust = clustNum; extra->clustDist = dist; } return !ret; } void GetScoreMatrix(int item, Rating* rating){ int i; for(i=0; i<GetDataSize(); ++i){ rating[i].rate = data.getScore(i,item); rating[i].id = data.getId(i,item); } } void SetScores(int item, Rating* rating){ int i; for ( i = 0; i < GetDataSize(); ++i) { data.setScore(i,item, (int)rating[i].rate); } } void DeleteItems(int item){ int i; for ( i = 0; i < GetDataSize(); ++i) { data.deleteItem(i,item); } } double AvgDistToClust(int index, int clust){ int min = dataAct.clustIndexes[clust]; int max; if(clust == dataAct.k-1) max = dataAct.centroidsIndex; else max = dataAct.clustIndexes[clust+1]; int i; value v1, v2; GetIthData( index, &v1); double sumDist = 0.0; for (i = min; i < max; ++i ) { GetIthData( i, &v2); sumDist += data.distance( &v1, &v2); } GetIthCentroid(clust , &v2); sumDist += data.distance( &v1, &v2); //if ( max == min) //return -1; //printf("size of Cluster %d %d\n", clust, max - min); //printf("%f\n", sumDist); return sumDist/(max - min+1); } double Silhouette(FILE* output){ double* a,*b,*s, *sAvg; double sTotal = 0; a = malloc((GetDataSize()-dataAct.k)*sizeof(double)); b = malloc((GetDataSize()-dataAct.k)*sizeof(double)); s = malloc((GetDataSize()-dataAct.k)*sizeof(double)); sAvg = malloc(dataAct.k*sizeof(double)); int i; for(i=0;i<dataAct.k;i++){ sAvg[i] = 0; } #pragma omp parallel for for(i=0; i<GetDataSize()-dataAct.k; i++){ a[i] = AvgDistToClust(i, GetClusterOf(i)); double minAvgClusterDist = 9999999999; int j; for (j = 0; j < GetNoOfCluster(); ++j) { if ( j == GetClusterOf(i)) continue; double avgClusterDist = AvgDistToClust(i, j); if ( avgClusterDist < minAvgClusterDist) minAvgClusterDist = avgClusterDist; } b[i] = minAvgClusterDist; //printf("%f, %f\n", b[i], a[i]); if(a[i] > b[i]) s[i] = b[i]/a[i] - 1; else if(a[i] < b[i]) s[i] = 1- a[i]/b[i]; else s[i] = 0; //sTotal += s[i]; } for (i = 0; i < GetDataSize()-dataAct.k; ++i) { sTotal += s[i]; sAvg[GetClusterOf(i)]+=s[i]; } // fprintf(output,"Silhouette: ["); // for(i=0;i<dataAct.k;i++){ // if(i>0) // fprintf(output,","); // fprintf(output,"%2f",sAvg[i]/GetClustSize(i)); // } sTotal = sTotal/(GetDataSize()-dataAct.k); // fprintf(output,",%2f]\n",sTotal); free(a); free(b); free(s); free(sAvg); return sTotal; } void DestroyClusters(){ free(dataAct.clustIndexes ); dataAct.k=0; dataAct.centroidNum=0; dataAct.centroidsIndex=dataAct.dataSize; } void DestroyData(){ int i; for (i = 0; i < GetDataSize(); ++i) { value v; GetIthData(i, &v); if(v.name != NULL) free(v.name); if(v.clustExtra != NULL) free(v.clustExtra); free(v.content); } free(dataAct.data); free(dataAct.clustIndexes); } void SetDataSize(int size) { dataAct.dataSize = size; } int GetAvgClustSize(){ int i,sum = 0;; for(i=0; i< dataAct.k; ++i){ sum+= GetClustSize(i); } return sum/dataAct.k; } int GetDataSize() { return dataAct.dataSize; } void GetIthData(int i, value* v) { memcpy(v, &dataAct.data[i], sizeof(value)); } void GetIthCentroid(int i, value* v) { if (i > dataAct.k) return; memcpy(v, &dataAct.data[i + dataAct.centroidsIndex], sizeof(value)); } int GetNoOfCluster() { return dataAct.k; } void SetNoOfCluster(int k){ dataAct.k = k; } void PrintData() { int i; for (i = 0; i < GetDataSize(); i++) { data.print(&dataAct.data[i]); } } void parseData(FILE* file, int kLSH) { dataAct.data = NULL; dataAct.clustIndexes = NULL; dataAct.centroidNum = 0; dataAct.k = 0; data.readData(&dataAct.data, file, kLSH); dataAct.centroidsIndex = dataAct.dataSize; } void ParseConfig(FILE* file, int* confNums){ char* lineBuff = NULL; char secBuff1[256]; char secBuff2[256]; int temp[5]; size_t lineSize = 0; size_t lineLen = 0; int i=0,count=1; memset(secBuff1, '\0', sizeof(secBuff1)); memset(secBuff2, '\0', sizeof(secBuff2)); for(i=0; i<5; i++) confNums[i] = 0; while ((lineLen = getline(&lineBuff, &lineSize, file)) != -1) { count++; if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) { perror("Malformed file - Exiting...\n"); exit(-1); } if(!strcmp(secBuff1,"number_of_clusters:")){ confNums[0] = 1; temp[0] = atoi(secBuff2); } if(!strcmp(secBuff1,"number_of_hash_functions:")){ confNums[1] = 1; temp[1] = atoi(secBuff2); } if(!strcmp(secBuff1,"number_of_hash_tables:")){ confNums[2] = 1; temp[2] = atoi(secBuff2); } if(!strcmp(secBuff1,"clarans_set_fraction:")){ confNums[3] = 1; temp[3] = atoi(secBuff2); } if(!strcmp(secBuff1,"clarans_iterations:")){ confNums[4] = 1; temp[4] = atoi(secBuff2); } } for(i = 0; i<5; i++){ if(confNums[i] == 1) confNums[i] = temp[i]; else{ if(i == 1) confNums[i] = 4; if(i == 2) confNums[i] = 5; if(i == 4) confNums[i] = 2; } } free(lineBuff); } int GetDataOfCluster(int clust, value*** vptr){ if ( clust > dataAct.k) return -1; int min = dataAct.clustIndexes[clust]; int max; if(clust == dataAct.k-1) max = dataAct.centroidsIndex; else max = dataAct.clustIndexes[clust+1]; *vptr = malloc(sizeof(value*)*(max-min)); int i; for (i = min; i < max; ++i){ // printf("%d %d %d\n",i,min,max); (*vptr)[i-min] = &(dataAct.data[i]); } return max - min; } void specifyDataset(FILE* file) { char* lineBuff = NULL; char secBuff1[256]; char secBuff2[256]; size_t lineSize = 0; size_t lineLen = 0; memset(secBuff1, '\0', sizeof(secBuff1)); memset(secBuff2, '\0', sizeof(secBuff2)); if ((lineLen = getline(&lineBuff, &lineSize, file)) == -1) { free(lineBuff); lineBuff = NULL; exit(-1); } if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) { perror("Malformed file - Exiting...\n"); exit(-1); } if (strcmp(secBuff2, "hamming") == 0) { data.distance = Hamming; data.inRange = InRangeHamming; data.print = PrintDataHamming; data.readData = ReadDataHamming; data.readQueries = ReadQueriesHamming; data.G = GHamming; data.getFirstQuery = GetFirstQueryHamming; data.getNextQuery = GetNextQueryHamming; data.initStruct = InitStructHamming; // data.bruteForce = BruteForceANNHamming; // data.bruteForceRange = BruteForceInRangeHamming; data.destroyStruct = DestroyStructHamming; data.destroyInput = DestroyInputHamming; data.destroyValue = DestroyValueHamming; } else if (strcmp(secBuff2, "vector") == 0) { memset(secBuff1, '\0', sizeof(secBuff1)); memset(secBuff2, '\0', sizeof(secBuff2)); if ((lineLen = getline(&lineBuff, &lineSize, file)) == -1) { // something went wrong free(lineBuff); lineBuff = NULL; exit(-1); } if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) { perror("Malformed file - Exiting...\n"); exit(-1); } if (strcmp(secBuff2, "euclidean") == 0) { data.distance = Euclidean; data.inRange = InRangeEuclidean; data.print = PrintDataEuclidean; data.G = FEuclidean; data.initStruct = InitStructEuclidean; data.destroyStruct = DestroyStructEuclidean; data.destroyInput = DestroyInputEuclidean; data.destroyValue = DestroyValueEuclidean; } } else if (strcmp(secBuff2, "matrix") == 0) { data.distance = DistanceMatrixDistance; data.inRange = InRangeDistanceMatrix; data.print = printEverything; data.readData = ReadDataDistanceMatrix; data.readQueries = ReadQueriesDistanceMatrix; data.G = GDistanceMatrix; data.getFirst = GetFirstDistanceMatrix; data.getNext = GetNextDistanceMatrix; data.getFirstQuery = GetFirstQueryDistanceMatrix; data.getNextQuery = GetNextQueryDistanceMatrix; data.initStruct = InitStructDistanceMatrix; data.bruteForce = BruteForceANNDistanceMatrix; data.destroyStruct = DestroyStructDistanceMatrix; data.destroyInput = DestroyInputDistanceMatrix; data.destroyValue = DestroyValueDistanceMatrix; } else { printf( "\nWrong metric space\nIt is \"%s\" instead of \"hamming\", " "\"vector\" or \"matrix\"\n\n", secBuff2); free(lineBuff); exit(-1); } free(lineBuff); }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
convolution_7x7_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = 49; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * 2 - outw * 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < 7; u++) { for (int v = 0; v < 7; v++) { const signed char* sptr = img.row<const signed char>(u) + v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; ptr[2] = sptr[4]; ptr[3] = sptr[6]; sptr += 8; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; sptr += 4; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += 2; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt); }
KdTree.c
/* Actually it's not a KD-Tree but a 3D-Tree */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #include "ompfuncs.h" #endif #include "KdTree.h" #include "libgad.h" KdNode * initKdNode (KdNode ** p, KdNode * parent) { *p=(KdNode *) malloc(sizeof(KdNode)); if (*p==NULL) return NULL; (*p)->down=NULL; (*p)->up =NULL; (*p)->parent =parent; (*p)->min=0; (*p)->max=0; (*p)->part=NULL; (*p)->id=-1; (*p)->dim =-1; return *p; } void delKdNode(KdNode **node) { KdNode * tmpNode = *node; if ( (*node) -> id == 0) { fprintf(stderr, "STOP %d! delKdNode error\n", (*node) -> id); exit(1); } if ((*node)->up !=NULL) delKdNode(&((*node)->up)); if ((*node)->down!=NULL) delKdNode(&((*node)->down)); if ((*node)->parent!=NULL) { if ( (*node)->parent->up == (*node) ) { (*node)->parent->up = NULL; } else if ( (*node)->parent->down == (*node) ) { (*node)->parent->down = NULL; } else { fprintf(stderr, "There is something wrong with your tree!\n"); // fprintf(stderr, "%d %d\n", (*node)->id, (*node)->parent->id); // fprintf(stderr, "%f %d\n", (*node)->parent->min, (*node)->parent->dim); // if ( (*node)->parent->part == NULL ) // fprintf(stderr, "kein parent part\n"); // fprintf(stderr, "%f %d\n", (*node)->part->pos[0], (*node)->part->id); // fprintf(stderr, "%d\n", checkKdTree(*node)); exit(1); } } free(tmpNode); tmpNode=NULL; } #ifndef _OPENMP void buildKdTree(KdNode * root, gadpart * partarr, unsigned int numpart, short int dim) { if (numpart>1) { extern int search_dim; search_dim=dim; qsort(partarr, numpart, sizeof(gadpart), cmp_pos); int median=numpart/2; root->part=&(partarr[median]); root->id=(partarr[median].id); root->min=partarr[0].pos[dim]; root->max=partarr[numpart-1].pos[dim]; root->dim=dim; if ((median)>0) { if (initKdNode(&(root->down),root)==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} buildKdTree(root->down, &(partarr[0]), (median), (dim+1)%3); } if ((numpart-(median+1))>0) { if (initKdNode(&(root->up), root)==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} buildKdTree(root->up , &(partarr[median+1]), (numpart-(median+1)), (dim+1)%3); } } else { root->part=&(partarr[0]); root->id=(partarr[0].id); root->min=partarr[0].pos[dim]; root->max=partarr[0].pos[dim]; root->dim=dim; } } #else void buildKdTree(KdNode * root, gadpart * partarr, unsigned int numpart, short int dim) { if (numpart>1) { void *a, *b; int median; switch (dim) { case 0: median=partarray(partarr, numpart, sizeof(gadpart), cmp_x, &a, &b); break; case 1: median=partarray(partarr, numpart, sizeof(gadpart), cmp_y, &a, &b); break; case 2: median=partarray(partarr, numpart, sizeof(gadpart), cmp_z, &a, &b); break; } gadpart *min=(gadpart *)a; gadpart *max=(gadpart *)b; root->part=&(partarr[median]); root->id=(partarr[median].id); root->min=min->pos[dim]; root->max=max->pos[dim]; if ((max->pos[dim] == partarr[median].pos[dim]) && (median>2)) { int j; for (j=median; j<numpart; j++) { if (partarr[j].pos[dim] < partarr[median].pos[dim]) { printf("lower than median\n"); exit(1); } if (partarr[j].pos[dim] > max->pos[dim]) { printf("larger than max\n"); exit(1); } } } root->dim=dim; #pragma omp parallel sections { #pragma omp section if ((median)>0) { if (initKdNode(&(root->down),root)==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} buildKdTree(root->down, partarr, (median), (dim+1)%3); } #pragma omp section if ((numpart-(median+1))>0) { if (initKdNode(&(root->up), root)==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} buildKdTree(root->up , &(partarr[median+1]), (numpart-(median+1)), (dim+1)%3); } } } else { root->part=&(partarr[0]); root->id=(partarr[0].id); root->min=partarr[0].pos[dim]; root->max=partarr[0].pos[dim]; root->dim=dim; } } #endif //_OPENMP //returns distance of fltarr to a Tree-node, 0 if inside double distKdNode(fltarr pos, KdNode * node) { fltarr min, max; short int dim = node->dim; min[dim]= node->min; max[dim]= node->max; if (node->parent!=NULL) { dim = node->parent->dim; min[dim]=node->parent->min; max[dim]=node->parent->max; if ( node->parent->up == (node)) { min[dim] = node->parent->part->pos[dim]; } else { max[dim] = node->parent->part->pos[dim]; } if (node->parent->parent!=NULL) { dim = node->parent->parent->dim; min[dim]=node->parent->parent->min; max[dim]=node->parent->parent->max; if ( node->parent->parent->up == (node->parent)) { min[dim] = node->parent->parent->part->pos[dim]; } else { max[dim] = node->parent->parent->part->pos[dim]; } return distbox_nopb(pos, min, max); } else { return 0; // double dist; // dist =SQR(ABS(pos[dim] - node->part->pos[dim])); // double tmp = MIN(ABS(pos[node->parent->dim] - node->parent->part->pos[node->parent->dim]), ABS(pos[node->parent->dim] - node->part->pos[node->parent->dim])); // dist+=SQR(tmp); // return sqrt(dist); } } else return 0; } unsigned int checkKdTree(KdNode * root) { float pos = root->part->pos[root->dim]; float dist = distKdNode( root->part->pos, root); int up = 0; if (root->parent != NULL) { if (root->parent->up == root) { up = 1; } int dim = root->parent->dim; pos = root->part->pos[dim]; if ( ( (up) && ( pos < root-> parent->part-> pos[dim] ) ) || ( (!up) && ( pos > root-> parent->part-> pos[dim] ) ) || (dist > 0)) { KdNode *tmp=root; int lvls=0; while (tmp->parent != NULL) { tmp = tmp->parent; lvls++; } fprintf(stderr, "particle out of bounds..%d...%f...%d!\n",root->part->id, dist, lvls); fprintf(stderr, "parent..%f...pos... %f...up....%d!\n",root->parent->part->pos[dim], pos, up); fprintf(stderr, "pos... %f %f %f\n",root->part->pos[0],root->part->pos[1],root->part->pos[2]); fprintf(stderr, "min %f max %f dim %d\n",root->min,root->max,root->dim); fprintf(stderr, "min %f max %f dim %d parent %f\n",root->parent->min,root->parent->max,root->parent->dim, root->parent->part->pos[root->parent->dim]); fprintf(stderr, "min %f max %f dim %d parent %f\n",root->parent->parent->min,root->parent->parent->max,root->parent->parent->dim, root->parent->parent->part->pos[root->parent->parent->dim]); if (root->up != NULL) fprintf(stderr, "up\n"); if (root->down != NULL) fprintf(stderr, "down\n"); exit(1); } } if (((root->up)!=NULL) && ((root->down)!=NULL)) return (checkKdTree(root->up)+checkKdTree(root->down)+1); if ((root->up)!=NULL) return (checkKdTree(root->up)+1); if ((root->down)!=NULL) return (checkKdTree(root->down)+1); return 1; } double distKdNodePB(fltarr pos, KdNode * node) { fltarr min, max; short int dim = node->dim; min[dim]= node->min; max[dim]= node->max; if (node->parent!=NULL) { min[node->parent->dim]=node->parent->min; max[node->parent->dim]=node->parent->max; if (node->parent->parent!=NULL) { min[node->parent->parent->dim]=node->parent->parent->min; max[node->parent->parent->dim]=node->parent->parent->max; return distbox(pos, min, max); } else { return 0; // double dist; // dist =SQR(ABS(pos[dim] - node->part->pos[dim])); // double tmp = MIN(ABS(pos[node->parent->dim] - node->parent->part->pos[node->parent->dim]), ABS(pos[node->parent->dim] - node->part->pos[node->parent->dim])); // dist+=SQR(tmp); // return sqrt(dist); } } else return 0; } //Find Nearest Neighbour of a particle KdNode * findNN(KdNode * root, gadpart * part) { double dist=-1; return KdNeighbor(root, part, &dist); } KdNode * KdNeighbor(KdNode * root, gadpart * part, double * dist) { double distr=distance_nopb(root->part->pos, part->pos); if (((distr < *dist) || (*dist<=0)) && (distr>0)) *dist=distr; short int dim = root->dim; fltarr min, max; if (distr==0) distr=root->max - root->min; double distup=0; double distdown=0; if (root->up !=NULL) distup =distKdNode(part->pos, root->up); if (root->down!=NULL) distdown=distKdNode(part->pos, root->down); if ((root->up!=NULL) && (root->down!=NULL)) { // printf("%f %f %f\n", distup, distdown, distr); if (((distup < *dist ) && (distdown < *dist )) || (*dist<=0)) { KdNode *a=KdNeighbor(root->up , part, dist); KdNode *b=KdNeighbor(root->down, part, dist); // (Vorsicht, glaub noch nicht, dass das stimmt) scheint zu funktionieren double dista=distance_nopb(a->part->pos, part->pos); double distb=distance_nopb(b->part->pos, part->pos); // printf("%f %f %f %f\n",distr, dista, distb, *dist); if (distr==0) distr=dista+distb; if (dista==0) dista=distr+distb; if (distb==0) distb=dista+distr; if ((dista<distb) && (dista<distr)) return a; if ((distb<dista) && (distb<distr)) return b; return root; } else if (distup < *dist ) { KdNode *a=KdNeighbor(root->up , part, dist); double dista=distance_nopb(a->part->pos, part->pos); if (dista < distr) return a; return root; } else if (distdown < *dist ) { KdNode *b=KdNeighbor(root->down , part, dist); double distb=distance_nopb(b->part->pos, part->pos); if (distb < distr) return b; return root; } } else if (root->up !=NULL) { KdNode *a=KdNeighbor(root->up , part, dist); double dista=distance_nopb(a->part->pos, part->pos); if (dista < distr) return a; return root; } else if (root->down!=NULL) { KdNode *b=KdNeighbor(root->down , part, dist); double distb=distance_nopb(b->part->pos, part->pos); if (distb < distr) return b; return root; } return root; } //find k nearest Neighbours, returns distance of farest found neighbour double findkNN(KdNode * root, gadpart * part, double dist, gadpart_dist ** result, int k) { unsigned int num=0, size=0, iter=0; if (dist==0) return 0; while (num<k) { if (num!=0) free(*result); num=0; size=0; findparts(root, part->pos, dist, result, &num, &size); dist+=dist; iter++; if (iter > MAX_ITERATIONS) { fprintf(stderr, "findkNN failed!!\n"); free(*result); return 0; } } *result=realloc(*result, num*sizeof(gadpart_dist)); qsort(*result, num, sizeof(gadpart_dist),cmp_dist); *result=realloc(*result, k*sizeof(gadpart_dist)); return (*result)[k-1].dist; } //find particles closer than dist to pos -> stored in *result, numpart void findparts(KdNode * root, fltarr pos, double dist, gadpart_dist ** result, unsigned int * numpart, unsigned int * size) { double distup=0; double distdown=0; if (root->up !=NULL) { distup =distKdNode(pos, root->up); if (distup < dist) findparts(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNode(pos, root->down); if (distdown < dist) findparts(root->down, pos, dist, result, numpart, size); } double distance=distance_nopb(pos, root->part->pos); if ((distance < dist) && (distance>0)) { if (!(*numpart)) { *result = (gadpart_dist *) malloc (PBUFF*sizeof(gadpart_dist)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart_dist)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); cpygadpart(&((*result)[(*numpart)-1].part) , root->part); (*result)[(*numpart)-1].dist=distance_nopb(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; } return ; } void findpartsPB(KdNode * root, fltarr pos, double dist, gadpart_dist ** result, unsigned int * numpart, unsigned int * size) { double distup=0; double distdown=0; if (root->up !=NULL) { distup =distKdNodePB(pos, root->up); if (distup < dist) findpartsPB(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNodePB(pos, root->down); if (distdown < dist) findpartsPB(root->down, pos, dist, result, numpart, size); } double Dist=distance(pos, root->part->pos); if ((Dist < dist) && (Dist>0)) { if (!(*numpart)) { *result = (gadpart_dist *) malloc (PBUFF*sizeof(gadpart_dist)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart_dist)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); cpygadpart(&((*result)[(*numpart)-1].part) , root->part); (*result)[(*numpart)-1].dist=distance(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; } return ; } void findGadpartsPB(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) { // printf("!"); double distup=0; double distdown=0; if (root->up !=NULL) { distup =distKdNodePB(pos, root->up); // printf(" du%f ", distup); if (distup < dist) findGadpartsPB(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNodePB(pos, root->down); // printf(" dd%f ", distdown); if (distdown < dist) findGadpartsPB(root->down, pos, dist, result, numpart, size); } double Dist=distance(pos, root->part->pos); if ((Dist < dist) && (Dist>0)) { if (!(*numpart)) { *result = (gadpart **) malloc (PBUFF*sizeof(gadpart*)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart*)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); // cpygadpart(&((*result)[(*numpart)-1].part) , root->part); // (*result)[(*numpart)-1].dist=distance(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; (*result)[(*numpart)-1]=(root->part); } return ; } void findGadparts(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) { // printf("!"); double distup=0; double distdown=0; if (root->up !=NULL) { distup =distKdNode(pos, root->up); // printf(" du%f ", distup); if (distup < dist) findGadparts(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNode(pos, root->down); // printf(" dd%f ", distdown); if (distdown < dist) findGadparts(root->down, pos, dist, result, numpart, size); } double Dist=distance_nopb(pos, root->part->pos); if ((Dist < dist) && (Dist>0)) { if (!(*numpart)) { *result = (gadpart **) malloc (PBUFF*sizeof(gadpart*)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart*)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); // cpygadpart(&((*result)[(*numpart)-1].part) , root->part); // (*result)[(*numpart)-1].dist=distance(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; (*result)[(*numpart)-1]=(root->part); } return ; } void findNewGadparts(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) { // printf("!"); double distup=0; double distdown=0; if (root->up !=NULL) { distup =distKdNode(pos, root->up); // printf(" du%f ", distup); if (distup < dist) findNewGadparts(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNode(pos, root->down); // printf(" dd%f ", distdown); if (distdown < dist) findNewGadparts(root->down, pos, dist, result, numpart, size); } double Dist=distance_nopb(pos, root->part->pos); if ((Dist < dist) && (Dist>0)) { gadpart** fnd = NULL; fnd = bsearch(&(root -> part), (*result), *numpart, sizeof(gadpart*), cmp_pointer_id); if ( fnd != NULL ) return; if (!(*numpart)) { *result = (gadpart **) malloc (PBUFF*sizeof(gadpart*)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; // printf("%d ", *numpart); if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart*)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); // cpygadpart(&((*result)[(*numpart)-1].part) , root->part); // (*result)[(*numpart)-1].dist=distance(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; int i = *numpart-2; while ( (i>=0) && ( root->part->id < (*result)[i]->id ) ) { (*result)[i+1] = (*result)[i]; i--; } // printf("insert: %d | %d\n", *numpart-i, root->part->id); (*result)[i+1]=(root->part); // if (i<*numpart-3) // printf("%d %d %d\n", (*result)[i]->id, (*result)[i+1]->id, (*result)[i+2]->id); // else // printf("%d %d\n", (*result)[i]->id, (*result)[i+1]->id); // (*result)[(*numpart)-1]=(root->part); } return ; } //find FOF-group around pos, dist = linking length static double maxdist = 0; static void FindFOF(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) { double distup=0; double distdown=0; int i; if (root->up !=NULL) { distup =distKdNode(pos, root->up); // printf(" du%f ", distup); if (distup < ( dist + maxdist )) FindFOF(root->up , pos, dist, result, numpart, size); } if (root->down!=NULL) { distdown=distKdNode(pos, root->down); // printf(" dd%f ", distdown); if (distdown < ( dist + maxdist )) FindFOF(root->down, pos, dist, result, numpart, size); } double Dist=distance_nopb(pos, root->part->pos); if ((Dist > maxdist + dist) || ( root -> id == -1) ) return; else { gadpart **fnd; fnd = bsearch(&(root -> part), (*result), *numpart, sizeof(gadpart*), cmp_pointer_id); if (( fnd != NULL) )// && (root -> parent !=NULL ) )//&& (root -> parent ->id)) { root -> id = -1; // printf("fnd: %d %d %d %d\n", root -> id, root -> part -> id, (*fnd) -> id, root -> parent ->id); if (root -> parent !=NULL ) if( (root -> down == NULL) && (root -> up == NULL) && (root -> parent != NULL) ) { // if (root -> parent -> down -> id == root -> id) delKdNode( &(root -> parent -> down ) ); // if (root -> parent -> up -> id == root -> id) delKdNode( &(root -> parent -> up ) ); // printf("*%d %d %d\n", root -> id, root ->parent -> id, root -> parent -> dim); fflush (stdout); if ((root -> parent -> down != NULL) && (root -> parent -> down -> id == root -> id)) { delKdNode( &( root -> parent -> down ) ); } else if ( (root -> parent -> up != NULL) && (root -> parent -> up -> id == root -> id)) { delKdNode( &(root -> parent -> up ) ); } } return; } } int cnt=0; // for ( i = 0; i < *numpart-1; i++ ) // { // if ((*result)[i]->id > (*result)[i+1]->id ) // { // printf("!#!#!\n"); // exit(1); // } // else cnt++; // } // printf("###%d %d### %d\n", cnt, *numpart, (*result)[*numpart-1] -> id); for ( i = 0; i < *numpart; i++ ) { double ddum = distance_nopb( (*result)[i]->pos, root->part->pos); if ((ddum < Dist) ) Dist = ddum; if (Dist < 1e-7) break; if (Dist < dist) { //printf("%d %d %f %f %d %d\n", *numpart, i, Dist, dist, root->id, (*result)[i]->id); break; } } if ((Dist < dist) && (Dist > 1e-7)) { double ddum = distance_nopb(pos, root->part->pos); if (( ddum > maxdist )) maxdist = ddum; if (!(*numpart)) { *result = (gadpart **) malloc (PBUFF*sizeof(gadpart*)); if (*result==NULL) {fprintf(stderr,"failed to allocate memory\n");exit(1);} *size=PBUFF; *numpart=1; } else { (*numpart)++; if (*numpart>=*size) { *size+=PBUFF; *result=realloc(*result, (*size)*sizeof(gadpart*)); } } // memcpy(&((*result)[(*numpart)-1].part), root->part, sizeof(gadpart)); // cpygadpart(&((*result)[(*numpart)-1].part) , root->part); // (*result)[(*numpart)-1].dist=distance(root->part->pos, pos); // (*result)[(*numpart)-1]=root->part; //Insert particle in list // if ((*numpart == 1) || (root -> part -> id > (*result)[*numpart - 2] -> id) ) // (*result)[(*numpart)-1]=(root->part); // else // for ( i = 0; i < (*numpart-1); i++) // { // if (root -> part -> id < (*result)[i] -> id) // { // int j; // for ( j = (*numpart-1); j > i; j--) // { // (*result)[j] = (*result)[j-1]; // } // (*result)[i] = root -> part; // break; // } // } int j = *numpart - 2; while ((j>=0) && (root -> part -> id < (*result)[j] -> id)) { (*result)[j+1] = (*result)[j]; j--; } j++; (*result)[j] = root -> part; // printf("new fof: %d %f \n", root -> id, Dist); if( (root -> down == NULL) && (root -> up == NULL) && (root -> parent != NULL) ) { // KdNode *tempNode = root -> parent ; if ((root -> parent -> down != NULL) && (root -> parent -> down -> id == root -> id)) delKdNode( &( root -> parent -> down ) ); else if ( (root -> parent -> up != NULL) && (root -> parent -> up -> id == root -> id)) delKdNode( &(root -> parent -> up ) ); // if ((tempNode != NULL) && (tempNode -> parent != NULL)) // FindFOF(tempNode->parent, pos, dist, result, numpart, size); } } return ; } //void findFOF(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) //{ // int nold = -1; // if (*numpart >= 1) // { // int i; // maxdist = 0; // for ( i = 0; i < *numpart; i++) // { // double ddum = distance_nopb(pos, (*result)[i] -> pos); // if (ddum > maxdist) maxdist = ddum; // } // } else maxdist = 0; // while (nold != *numpart) // { // nold = *numpart; // //printf("nold %d\n", nold); fflush(stdout); // FindFOF(root, pos, dist, result, numpart, size); // } // return; //} void findFOF(KdNode * root, fltarr pos, double dist, gadpart *** result, int * numpart, unsigned int * size) { int minfac = 0; int *done = NULL; if ((*numpart == 0) || (*result == NULL)) { int fac = 3; findGadparts(root, pos, fac * dist, result, numpart, size); qsort(*result, *numpart, sizeof(gadpart *), cmp_pointer_id ); minfac = 0.92 * fac; } int nold = 0; int j = 0; int i = 0; done = (int *) malloc(sizeof(int) * MAXINT); int ndone = 0; while (nold < *numpart) { nold = *numpart; for ( i = 0; i < *numpart; i++ ) { int* fnd = NULL; int insertid = (*result)[i]->id; fnd = bsearch( &(insertid), done, ndone, sizeof(int), cmp_int ); if ( fnd == NULL ) { double Dist=distance_nopb(pos, (*result)[i]->pos); if (Dist >= minfac * dist) { findNewGadparts(root, (*result)[i]->pos, dist, result, numpart, size); } if (ndone < MAXINT) { j = ndone-1; while ((j>=0) && (done[j] > insertid)) { done[j+1] = done[j]; j--; } j++; done[j] = insertid; ndone++; } } } } free(done); } int checkTree(KdNode *root) { int check=0; if (root->up !=NULL) check+=checkTree(root->up); if (root->down!=NULL) check+=checkTree(root->down); if (root->part->id==root->id) return (check); else return (++check); }
laplace_acc-omp.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include "globals.h" //contains array sizes and needed externs #ifndef _JUSTOMP_ #include "functions_acc.h" #endif #ifndef _JUSTACC_ #include "functions_omp.h" #endif #if defined (_UPDATE_INTERNAL_) || defined (_ALL_INTERNAL_) #if !defined (_PGI_) && !defined (_NVCPP_) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) #endif #endif // smallest permitted change in temperature #define MAX_TEMP_ERROR 0.02 // Global arrays //double *restrict T_new; // temperature grid //double *restrict T; // temperature grid from last iteration // initialisation routine void init(double *restrict T, double *restrict T_new); int main(int argc, char *argv[]) { int i, j; // grid indexes int max_iterations; // maximal number of iterations int iteration=1; // iteration double dt=100; // largest change in temperature struct timeval start_time, stop_time, elapsed_time; // timers double *restrict T_new=(double*)malloc(sizeof(double)*(GRIDX+2)*(GRIDY+2)); // temperature grid double *restrict T=(double*)malloc(sizeof(double)*(GRIDX+2)*(GRIDY+2)); // temperature grid from last iteration if(argc!=2) { printf("Usage: %s number_of_iterations\n",argv[0]); exit(1); } else { max_iterations=atoi(argv[1]); } gettimeofday(&start_time,NULL); init(T,T_new); #ifndef _NOPRELOAD_ #if defined(_JUSTOMP_) || defined(_PRELOADOMP_) //#pragma omp target data map(tofrom:T) map(alloc:T_new) //:gcc11:fails in runtime: illegal memory access #pragma omp target data map(tofrom:T[:(GRIDX+2)*(GRIDY+2)]) map(alloc:T_new[:(GRIDX+2)*(GRIDY+2)]) //:gcc11:works #else //#pragma acc data copy(T) create(T_new) //:pgi:fails in compilation: error says "cannot determine bounds" //:gcc11:fails in runtime: illegal memory access #pragma acc data copy(T[:(GRIDX+2)*(GRIDY+2)]) create(T_new[:(GRIDX+2)*(GRIDY+2)]) //:pgi:works //:gcc11:works #endif #endif // simulation iterations while ( dt > MAX_TEMP_ERROR && iteration <= max_iterations ) { /*for ( iteration=1; iteration <=max_iterations; iteration++){ if (dt > MAX_TEMP_ERROR) {*/ // main computational kernel, average over neighbours in the grid #if defined (_AVERAGE_INTERNAL_) || defined (_ALL_INTERNAL_) #ifndef _JUSTOMP_ //#pragma acc kernels // #pragma acc loop independent //together with kernels above //:pgi:justacc:(internal):works (fast:only copies data outside the while) //#pragma acc parallel loop collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):works (fast:only copies data outside the while) //#pragma acc parallel loop copyin(T) copyout(T_new) collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):fails at runtime:illegal memory access #pragma acc parallel loop copyin(T[:(GRIDX+2)*(GRIDY+2)]) copyout(T_new[:(GRIDX+2)*(GRIDY+2)]) collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):works (fast:only copies data outside the while) //#pragma acc parallel loop pcopyin(T[:(GRIDX+2)*(GRIDY+2)]) pcopyout(T_new[:(GRIDX+2)*(GRIDY+2)]) collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):works (fast:only copies data outside the while) //#pragma acc parallel loop present(T) present(T_new) collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):fails at runtime:present clause error //#pragma acc parallel loop present(T[:(GRIDX+2)*(GRIDY+2)]) present(T_new[:(GRIDX+2)*(GRIDY+2)]) collapse(2) //:pgi:justacc:(internal):works (fast:only copies data outside the while) //:gcc11:justacc:(internal):works (fast:only copies data outside the while) #else //#pragma omp target //:gcc11:justomp:(internal):works (fast:only copies data outside the while) //#pragma omp target map(to:T) map(from:T_new) //:gcc11:justomp:(internal):fails at execution time: illegal memory access #pragma omp target map(to:T[:(GRIDX+2)*(GRIDY+2)]) map(from:T_new[:(GRIDX+2)*(GRIDY+2)]) //:gcc11:justomp:(internal):works (fast:only copies data outside the while) #pragma omp teams distribute parallel for collapse(2) private(i,j) #endif for(i = 1; i <= GRIDX; i++) #ifndef _JUSTOMP_ // #pragma acc loop independent //together with kernels above #endif for(j = 1; j <= GRIDY; j++) T_new[OFFSET(i,j)] = 0.25 * (T[OFFSET(i+1,j)] + T[OFFSET(i-1,j)] + T[OFFSET(i,j+1)] + T[OFFSET(i,j-1)]); #else #ifndef _JUSTOMP_ getAverage_acc(T,T_new); #else getAverage_omp(T,T_new); #endif #endif // reset dt dt = 0.0; // compute the largest change and copy T_new to T #if defined (_UPDATE_INTERNAL_) || (_ALL_INTERNAL_) #ifndef _JUSTACC_ //#pragma omp target map(dt) //#pragma omp target map(tofrom:T,dt) map(to:T_new) #pragma omp target map(tofrom:T[:(GRIDX+2)*(GRIDY+2)],dt) map(to:T_new[:(GRIDX+2)*(GRIDY+2)]) #pragma omp teams distribute parallel for collapse(2) reduction(max:dt) private(i,j) #else //#pragma acc kernels // #pragma acc loop independent //together with kernels above //#pragma acc parallel loop reduction(max:dt) collapse(2) //#pragma acc parallel loop copy(T) copyin(T_new) reduction(max:dt) collapse(2) #pragma acc parallel loop copy(T[:(GRIDX+2)*(GRIDY+2)]) copyin(T_new[:(GRIDX+2)*(GRIDY+2)]) reduction(max:dt) collapse(2) //#pragma acc parallel loop pcopy(T[:(GRIDX+2)*(GRIDY+2)]) pcopyin(T_new[:(GRIDX+2)*(GRIDY+2)]) reduction(max:dt) collapse(2) //#pragma acc parallel loop present(T) present(T_new) reduction(max:dt) collapse(2) //#pragma acc parallel loop present(T[:(GRIDX+2)*(GRIDY+2)]) present(T_new[:(GRIDX+2)*(GRIDY+2)]) reduction(max:dt) collapse(2) #endif for(i = 1; i <= GRIDX; i++){ #ifndef _JUSTACC_ #define papa 0 #else // #pragma acc loop independent //together with kernels above #endif for(j = 1; j <= GRIDY; j++){ #if defined (_PGI_) || defined (_NVCPP_) dt = fmax( fabs(T_new[OFFSET(i,j)]-T[OFFSET(i,j)]), dt); #else dt = MAX( fabs(T_new[OFFSET(i,j)]-T[OFFSET(i,j)]), dt); #endif T[OFFSET(i,j)] = T_new[OFFSET(i,j)]; } } #else #ifndef _JUSTACC_ dt = updateT_omp(T,T_new,dt); #else //dt = updateT_acc(GRIDX,GRIDY,T,T_new,dt); dt = updateT_acc(T,T_new,dt); #endif #endif // periodically print largest change if((iteration % 100) == 0) printf("Iteration %4.0d, dt %f\n",iteration,dt); iteration++; /*}else { break; }*/ } gettimeofday(&stop_time,NULL); timersub(&stop_time, &start_time, &elapsed_time); // measure time printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0); return 0; } // initialize grid and boundary conditions void init(double *restrict T, double *restrict T_new){ int i,j; for(i = 0; i <= GRIDX+1; i++){ for (j = 0; j <= GRIDY+1; j++){ T[OFFSET(i,j)] = 0.0; } } // these boundary conditions never change throughout run // set left side to 0 and right to a linear increase for(i = 0; i <= GRIDX+1; i++) { T[OFFSET(i,0)] = 0.0; T[OFFSET(i,GRIDY+1)] = (128.0/GRIDX)*i; } // set top to 0 and bottom to linear increase for(j = 0; j <= GRIDY+1; j++) { T[OFFSET(0,j)] = 0.0; T[OFFSET(GRIDX+1,j)] = (128.0/GRIDY)*j; } }
resize-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file resize-inl.h * \brief image resize operator using opencv and only support bilinear resize * \author Jake Lee */ #ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_ #define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_ #include <mxnet/base.h> #include <vector> #include "../mxnet_op.h" #include "../operator_common.h" #include "image_utils.h" #if MXNET_USE_OPENCV #include <opencv2/opencv.hpp> #endif // MXNET_USE_OPENCV namespace mxnet { namespace op { namespace image { using namespace mshadow; #if MXNET_USE_CUDA template<typename DType, typename T, typename Acctype> void ResizeImplCUDA(Stream<gpu> *s, const T input, const T output); #endif // MXNET_USE_CUDA struct ResizeParam : public dmlc::Parameter<ResizeParam> { nnvm::Tuple<int> size; bool keep_ratio; int interp; DMLC_DECLARE_PARAMETER(ResizeParam) { DMLC_DECLARE_FIELD(size) .set_default(nnvm::Tuple<int>()) .describe("Size of new image. Could be (width, height) or (size)"); DMLC_DECLARE_FIELD(keep_ratio) .describe("Whether to resize the short edge or both edges to `size`, " "if size is give as an integer.") .set_default(false); DMLC_DECLARE_FIELD(interp) .set_default(1) .describe("Interpolation method for resizing. By default uses bilinear interpolation" "Options are INTER_NEAREST - a nearest-neighbor interpolation" "INTER_LINEAR - a bilinear interpolation" "INTER_AREA - resampling using pixel area relation" "INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood" "INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood" "Note that the GPU version only support bilinear interpolation(1)" " and the result on cpu would be slightly different from gpu." "It uses opencv resize function which tend to align center on cpu" "while using contrib.bilinearResize2D which aligns corner on gpu"); } }; // handle the keep ratio param inline SizeParam GetHeightAndWidth(int data_h, int data_w, const ResizeParam& param) { CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2)) << "Input size dimension must be 1 or 2, but got " << param.size.ndim(); int resized_h; int resized_w; if (param.size.ndim() == 1) { CHECK_GT(param.size[0], 0) << "Input size should be greater than 0, but got " << param.size[0]; if (!param.keep_ratio) { resized_h = param.size[0]; resized_w = param.size[0]; } else { if (data_h > data_w) { resized_w = param.size[0]; resized_h = static_cast<int>(data_h * resized_w / data_w); } else { resized_h = param.size[0]; resized_w = static_cast<int>(data_w * resized_h / data_h); } } } else { CHECK_GT(param.size[0], 0) << "Input width should be greater than 0, but got " << param.size[0]; CHECK_GT(param.size[1], 0) << "Input height should be greater than 0, but got " << param.size[1]; resized_h = param.size[1]; resized_w = param.size[0]; } return SizeParam(resized_h, resized_w); } inline bool ResizeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { // input attrs should only be (h, w, c) or (n, h, w, c) CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U)) << "Input image dimension should be 3 or 4 but got " << in_attrs->at(0).ndim(); const auto& ishape = (*in_attrs)[0]; const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed); SizeParam size; if (ishape.ndim() == 3) { size = GetHeightAndWidth(ishape[H], ishape[W], param); SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]})); } else { size = GetHeightAndWidth(ishape[kH], ishape[kW], param); SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]})); } return true; } inline void ResizeImpl(const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const int height, const int width, const int interp, const int input_index = 0, const int output_index = 0) { #if MXNET_USE_OPENCV CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16"; CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64)) << "opencv resize doesn't support int32, int64"; // mapping to opencv matrix element type according to channel const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S}; if (inputs[0].ndim() == 3) { const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]); cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_); cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_); cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp); CHECK(!dst.empty()); CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_); } else { const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { cv::Mat buf(inputs[0].shape_[kH], inputs[0].shape_[kW], cv_type, inputs[0].dptr<DType>() + input_index); cv::Mat dst(outputs[0].shape_[kH], outputs[0].shape_[kW], cv_type, outputs[0].dptr<DType>() + output_index); cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp); CHECK(!dst.empty()); CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index); }); } #else LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator."; #endif // MXNET_USE_OPENCV } template <typename xpu> inline void Resize(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(outputs.size(), 1U); const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed); SizeParam size; if (std::is_same<xpu, gpu>::value) { #if MXNET_USE_CUDA CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].ndim() == 3) { Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s); Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s); ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output); } else { Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s); Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s); ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output); } }); #endif // MXNET_USE_CUDA } else if (inputs[0].ndim() == 3) { size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param); ResizeImpl(inputs, outputs, size.height, size.width, param.interp); } else { size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param); const auto batch_size = inputs[0].shape_[N]; const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC]; const auto output_step = size.height * size.width * inputs[0].shape_[kC]; #pragma omp parallel for for (auto i = 0; i < batch_size; ++i) { ResizeImpl(inputs, outputs, size.height, size.width, param.interp, i * input_step, i * output_step); } } } } // namespace image } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
GB_binop__minus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint8) // A*D function (colscale): GB (_AxD__minus_uint8) // D*A function (rowscale): GB (_DxB__minus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint8) // C=scalar+B GB (_bind1st__minus_uint8) // C=scalar+B' GB (_bind1st_tran__minus_uint8) // C=A+scalar GB (_bind2nd__minus_uint8) // C=A'+scalar GB (_bind2nd_tran__minus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bfs_csr_mt.c
#include "graph_defs.h" #include "prefetcher.h" typedef struct bfs_metadata_st { char touched; volatile unsigned long queue_next; } bfs_metadata_t; static volatile unsigned long queue_head = ULONG_MAX; static volatile unsigned long vertex_position = 0; static bfs_metadata_t *metadata; static csr_t * volatile graph; unsigned long MAX_CACHE = ULONG_MAX; long MIN_CACHE = 0; unsigned long visited = 0; void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size, unsigned long ift) { static unsigned long old_hoq = ULONG_MAX; unsigned long current_hoq = ULONG_MAX; static unsigned long ra_depth = 0; static char preload = 0; static long pf_visited = 0; unsigned long entries = 0; /* Fill in inner-loop entries from BFS queue */ /* if ((preload == 0) && (ra_depth > MAX_CACHE)) { preload = 1; current_hoq = ULONG_MAX; } */ current_hoq = old_hoq; if ((current_hoq == ULONG_MAX) || (((signed long) (pf_visited - visited)) > MIN_CACHE)/*|| (ra_depth > MIN_CACHE)*/) { current_hoq = queue_head; pf_visited = visited; // ra_depth = 0; } // if (((signed long)(pf_visited - visited)) > MIN_CACHE) return; /* if(current_hoq != ULONG_MAX) { current_hoq = metadata[current_hoq].queue_next; } */ while (entries != ift && current_hoq != ULONG_MAX) { unsigned long page = graph->index[current_hoq]; unsigned long end = graph->index[current_hoq + 1]; page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */ end = end >> (ASSUME_PAGE_SHIFT + 3); // if(laf[HASH_MODULO(page, laf_size)] != page) { // laf[HASH_MODULO(page, laf_size)] = page; // for (; page <= end; page++) { // if (entries==ift) break; laf[entries] = page; if (end > page) laf[entries + (2 * laf_size)] = end - page; entries++; // } // } old_hoq = current_hoq; current_hoq = metadata[current_hoq].queue_next; pf_visited++; } ra_depth += entries; } unsigned long prefetcher_sequential_callback(unsigned long* aux_offset) { unsigned long offset = graph->index[vertex_position]; return offset >> (ASSUME_PAGE_SHIFT + 3); } unsigned long alist_entries_seen = 0; // #pragma omp threadprivate(current_vertex) unsigned long total_queue_demands = 0; unsigned long queue_above_threshold = 0; unsigned long queue_length = 0; /* returns number of connected components */ static unsigned long bfs(csr_t *graph, unsigned long start_node) { unsigned long i; unsigned long components = 0; unsigned long queue_tail = ULONG_MAX; unsigned long nq_head = ULONG_MAX; unsigned long nq_tail = ULONG_MAX; char* finished_flag = NULL; unsigned long time_comp, time_giant = 0, id_giant; i = start_node; do { vertex_position = i; if (metadata[i].touched == 0) { CLOCK_START(time_comp); metadata[i].touched = 1; components++; BFS_PUSH(nq_head, nq_tail, i, metadata); queue_length = 1; } else { i++; if (i >= graph->vertex_cnt) i = 0; continue; } while (nq_head != ULONG_MAX) { queue_head = nq_head; queue_tail = nq_tail; nq_head = ULONG_MAX; nq_tail = ULONG_MAX; #pragma omp parallel default(shared) { #pragma omp task default(shared) { while (1) { unsigned long current_vertex; char finished = 0; #pragma omp critical (check_queue) { if (queue_head != ULONG_MAX) { current_vertex = BFS_POP(queue_head, queue_tail, metadata); visited++; } else { current_vertex = ULONG_MAX; } } if (current_vertex == ULONG_MAX) break; //fprintf(stderr, "V %ld %d\n", current_vertex, // omp_get_num_threads()); if (current_vertex != ULONG_MAX) { unsigned long lq_head = ULONG_MAX; unsigned long lq_tail = ULONG_MAX; csr_edge_iterator_t iter; csr_init_edge_iterator(graph, current_vertex, &iter); while (csr_iter_step(graph, &iter) == 0) { if (!iter.incoming) { unsigned long target = iter.neighbour; //#pragma omp critical (atomicset) { if (__sync_bool_compare_and_swap(&(metadata[target].touched),0, 1)) { //metadata[target].touched = 1; BFS_PUSH(lq_head, lq_tail, target, metadata); // fprintf(stderr, "T %ld %d\n", target, // omp_get_thread_num()); } } } } #pragma omp critical (stitch) { BFS_STITCH(nq_head, nq_tail, lq_head, lq_tail, metadata); // fprintf(stderr, "%ld %ld %ld %ld\n", nq_head, nq_tail, lq_head, lq_tail); } } } } } } CLOCK_STOP(time_comp); if (time_comp > time_giant) { time_giant = time_comp; id_giant = i; printf("Visited %ld\n", visited); } i = i + 1; if (i >= graph->vertex_cnt) { i = 0; } } while (i != start_node); // fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt); assert(visited == graph->vertex_cnt); printf("TIME GIANT COMP %lu\n", time_giant); printf("ID GIANT COMP %lu\n", id_giant); return components; } int main(int argc, char **argv) { unsigned long time_bfs, time_total, components; CLOCK_START(time_total); if (argc < 3) { fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]); exit(-1); } #ifdef PREFETCHER char *env_var; env_var = getenv("CMAX"); if(env_var != NULL) { MAX_CACHE = atol(env_var); } env_var = getenv("CMIN"); if(env_var != NULL) { MIN_CACHE = atol(env_var); } bind_master(); init_prefetcher(prefetcher_random_callback, NULL); // prefetcher_sequential_callback); #endif graph = open_csr(argv[1]); metadata = (bfs_metadata_t*) map_anon_memory(graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata"); //balloon_inflate(); /* Simulate semi-em conditions */ print_mlocked_memory(); unsigned long root_id = atol(argv[2]); assert(root_id < graph->vertex_cnt); /* Perhaps mmap /dev/null instead ? */ memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t)); #ifdef PREFETCHER launch_prefetch_thread(graph->fd_calist); #endif struct rusage ru_begin; getrusage(RUSAGE_SELF, &ru_begin); CLOCK_START(time_bfs); components = bfs(graph, root_id); CLOCK_STOP(time_bfs); struct rusage ru_end; getrusage(RUSAGE_SELF, &ru_end); #ifdef PREFETCHER terminate_prefetch_thread(); destroy_prefetcher(); #endif munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t)); close_csr(graph); CLOCK_STOP(time_total); printf("COMPONENTS %lu\n", components); printf("TIME BFS %lu\n", time_bfs); printf("TIME TOTAL %lu\n", time_total); print_rusage_stats(stdout, &ru_begin, &ru_end); printf("F_THRESHOLD %f\n", ((double) queue_above_threshold) / total_queue_demands); return 0; }
vector.h
#ifndef INCLUDED_VECTOR_H #define INCLUDED_VECTOR_H //!< Prevent multiple inclusion #include<vector> #include<assert.h> #include<config.h> /*!\file \brief Declaration/definition of Vector and Matrix classes. Also declarations of functions that perform operations on complex vectors. */ #include <string.h> #include <comms/sysfunc_cps.h> #include <util/data_types.h> #include <util/vector_asm.h> #include <util/verbose.h> #include <util/omp_wrapper.h> #define VEC_INLINE #if 0 #define NOINLINE_MACRO __attribute((noinline)) #else #define NOINLINE_MACRO #endif CPS_START_NAMESPACE const int OMP_CUTOFF=100; class Vector; // forward declaration class Matrix; //------------------------------------------------------------------ // Declarations of some genaral c-style functions that perform // operations on vectors. For these functions there exists // optimized assembly code. //------------------------------------------------------------------ extern "C" { //! vector copy; b = a //void moveMem(void *b, const void *a, int len); inline void moveMem(void *b, const void *a, int len) { #undef PROFILE #ifdef PROFILE double time = -dclock(); #endif memcpy(b, a, len); #ifdef PROFILE time += dclock(); print_flops("","moveMem",len,time); #endif } //void moveFloat(Float *b, const Float *a, int len); inline void moveFloat(Float *b, const Float *a, int len) { if(len> OMP_CUTOFF){ #pragma omp parallel for for(int i =0;i<len;i++) b[i] = a[i]; } else { memcpy(b, a, len*sizeof(Float)); } } //! 3x3 complex matrix multiplication; c = ab #ifndef VEC_INLINE void mDotMEqual(IFloat* c, const IFloat* a, const IFloat* b); #else inline void mDotMEqual(IFloat* c, const IFloat* a, const IFloat* b) { *c = *a * *b - *(a+1) * *(b+1) + *(a+2) * *(b+6) - *(a+3) * *(b+7) + *(a+4) * *(b+12) - *(a+5) * *(b+13); *(c+1) = *a * *(b+1) + *(a+1) * *b + *(a+2) * *(b+7) + *(a+3) * *(b+6) + *(a+4) * *(b+13) + *(a+5) * *(b+12); *(c+2) = *a * *(b+2) - *(a+1) * *(b+3) + *(a+2) * *(b+8) - *(a+3) * *(b+9) + *(a+4) * *(b+14) - *(a+5) * *(b+15); *(c+3) = *a * *(b+3) + *(a+1) * *(b+2) + *(a+2) * *(b+9) + *(a+3) * *(b+8) + *(a+4) * *(b+15) + *(a+5) * *(b+14); *(c+4) = *a * *(b+4) - *(a+1) * *(b+5) + *(a+2) * *(b+10) - *(a+3) * *(b+11) + *(a+4) * *(b+16) - *(a+5) * *(b+17); *(c+5) = *a * *(b+5) + *(a+1) * *(b+4) + *(a+2) * *(b+11) + *(a+3) * *(b+10) + *(a+4) * *(b+17) + *(a+5) * *(b+16); *(c+6) = *(a+6) * *b - *(a+7) * *(b+1) + *(a+8) * *(b+6) - *(a+9) * *(b+7) + *(a+10) * *(b+12) - *(a+11) * *(b+13); *(c+7) = *(a+6) * *(b+1) + *(a+7) * *b + *(a+8) * *(b+7) + *(a+9) * *(b+6) + *(a+10) * *(b+13) + *(a+11) * *(b+12); *(c+8) = *(a+6) * *(b+2) - *(a+7) * *(b+3) + *(a+8) * *(b+8) - *(a+9) * *(b+9) + *(a+10) * *(b+14) - *(a+11) * *(b+15); *(c+9) = *(a+6) * *(b+3) + *(a+7) * *(b+2) + *(a+8) * *(b+9) + *(a+9) * *(b+8) + *(a+10) * *(b+15) + *(a+11) * *(b+14); *(c+10) = *(a+6) * *(b+4) - *(a+7) * *(b+5) + *(a+8) * *(b+10) - *(a+9) * *(b+11) + *(a+10) * *(b+16) - *(a+11) * *(b+17); *(c+11) = *(a+6) * *(b+5) + *(a+7) * *(b+4) + *(a+8) * *(b+11) + *(a+9) * *(b+10) + *(a+10) * *(b+17) + *(a+11) * *(b+16); *(c+12) = *(a+12) * *b - *(a+13) * *(b+1) + *(a+14) * *(b+6) - *(a+15) * *(b+7) + *(a+16) * *(b+12) - *(a+17) * *(b+13); *(c+13) = *(a+12) * *(b+1) + *(a+13) * *b + *(a+14) * *(b+7) + *(a+15) * *(b+6) + *(a+16) * *(b+13) + *(a+17) * *(b+12); *(c+14) = *(a+12) * *(b+2) - *(a+13) * *(b+3) + *(a+14) * *(b+8) - *(a+15) * *(b+9) + *(a+16) * *(b+14) - *(a+17) * *(b+15); *(c+15) = *(a+12) * *(b+3) + *(a+13) * *(b+2) + *(a+14) * *(b+9) + *(a+15) * *(b+8) + *(a+16) * *(b+15) + *(a+17) * *(b+14); *(c+16) = *(a+12) * *(b+4) - *(a+13) * *(b+5) + *(a+14) * *(b+10) - *(a+15) * *(b+11) + *(a+16) * *(b+16) - *(a+17) * *(b+17); *(c+17) = *(a+12) * *(b+5) + *(a+13) * *(b+4) + *(a+14) * *(b+11) + *(a+15) * *(b+10) + *(a+16) * *(b+17) + *(a+17) * *(b+16); } #endif //! CK: 3x3 complex matrix multiplication with complex conjugate on first matrix; c = Conj(a)b void mStarDotMEqual(IFloat* c, const IFloat* a, const IFloat* b); //! CK: 3x3 complex matrix multiplication with complex conjugate on second matrix; c = a Conj(b) void mDotMStarEqual(IFloat* c, const IFloat* a, const IFloat* b); //! CK: 3x3 complex matrix multiplication with complex conjugate on second matrix; c = Conj(a) Conj(b) void mStarDotMStarEqual(IFloat* c, const IFloat* a, const IFloat* b); //! 3x3 complex matrix multiplication and sum; c += ab void mDotMPlus(IFloat* c, const IFloat* a, const IFloat* b); //! CK: 3x3 complex matrix multiplication and sum; c += Conj(a)b void mStarDotMPlus(IFloat* c, const IFloat* a, const IFloat* b); //! CK: 3x3 complex matrix multiplication and sum; c += a Conj(b) void mDotMStarPlus(IFloat* c, const IFloat* a, const IFloat* b); //! CK: 3x3 complex matrix multiplication and sum; c += Conj(a) Conj(b) void mStarDotMStarPlus(IFloat* c, const IFloat* a, const IFloat* b); //! 3x3 complex matrix times vector; y = Mx void uDotXEqual(IFloat* y, const IFloat* m, const IFloat* x); //! vector scalar product; a.b IFloat dotProduct(const IFloat *a, const IFloat *b, int); //! vector addition; a += b #ifndef VEC_INLINE void vecAddEquVec(IFloat *a, const IFloat *b, int); #else inline void vecAddEquVec(IFloat *a, const IFloat *b, int len) { #pragma omp parallel for for(int i = 0; i < len; ++i) { a[i] += b[i]; } } #endif //! vector subtraction; a -= b void vecMinusEquVec(IFloat *a, const IFloat *b, int); inline void vecMinusEquVecSingle(IFloat *a, const IFloat *b, int len) { for(int i = 0; i < len; ++i) { *a++ -= *b++; } } //! vector negation; a = -b void vecNegative(IFloat *a, const IFloat *b, int); //! set all elements to zero void vecZero(IFloat *a, int size); /*! \param a The vector to be multiplied \param b The real scalar \param len The length of the vectors. \post \a a is the multiplied vector. */ inline void vecTimesEquFloat(IFloat *a, IFloat b, size_t len) { #pragma omp parallel for for(size_t i = 0; i < len; ++i) { a[i] *= b; } } inline void vecAddEquFloat(IFloat *a, IFloat b, size_t len) { #pragma omp parallel for for(size_t i = 0; i < len; ++i) { a[i] += b; } } inline void vecTimesEquFloatSingle(IFloat *a, IFloat b, size_t len) { for(size_t i = 0; i < len; ++i) { *(a+i) *= b; } } void vecTimesComplex(IFloat *a, IFloat re, IFloat im, const IFloat *c, int len); //! real scalar times vector multiplication; a = c*b void vecEqualsVecTimesEquFloat(IFloat *a, IFloat *b, IFloat c, int); // //! vector linear combination; a = bc+d void fTimesV1PlusV2(IFloat *a, IFloat b, const IFloat *c, const IFloat *d, int size); inline void fTimesV1PlusV2Single(IFloat *a, IFloat b, const IFloat *c, const IFloat *d, int len) { for(int i = 0; i < len; ++i) { *a++ = b * *c++ + *d++; } } //! vector linear combination; a = bc-d void fTimesV1MinusV2(IFloat *a, IFloat b, const IFloat *c, const IFloat *d, int size); //! complex vector scalar product; a.b void compDotProduct(IFloat *c_r, IFloat *c_i, const IFloat *a, const IFloat *b, int); //! complex vector linear combination; a = bc+d void cTimesV1PlusV2(IFloat *a, IFloat b_re, IFloat b_im, const IFloat *c, const IFloat *d, int size); //! Not implemented on qcdsp void cTimesV1MinusV2(IFloat *a, IFloat b_re, IFloat b_im, const IFloat *c, const IFloat *d, int size); // A = b*C-D //! matrix linear combination; a = 1-bc void oneMinusfTimesMatrix(IFloat *a, IFloat b, const IFloat *c, int n); } //------------------------------------------------------------------ // Declarations of some genaral c-style functions that perform // operations on vectors. For these functions there is // no optimized assembly. //------------------------------------------------------------------ //! Multiplication of complex vector by matrix and addition; y += Mx void uDotXPlus(IFloat* y, const IFloat* u, const IFloat* x); //! Multiplication of complex vector by matrix and subtraction; y -= Mx void uDotXMinus(IFloat* y, const IFloat* u, const IFloat* x); //! Multiplication of complex vector by hermitian conjugate matrix and summation; y += M^dagger x void uDagDotXEqual(IFloat* y, const IFloat* u, const IFloat* x); //! Multiplication of complex vector by hermitian conjugate matrix; y = M^dagger x void uDagDotXPlus(IFloat* y, const IFloat* u, const IFloat* x); //------------------------------------------------------------------ // Declarations of some genaral c-style functions that compute // re/im parts of the su3 characters of various representations // of su3 matrices. //------------------------------------------------------------------ extern IFloat reChar6(IFloat *p) ; extern IFloat imChar6(IFloat *p) ; extern IFloat reChar8(IFloat *p) ; extern IFloat reChar10(IFloat *p) ; extern IFloat imChar10(IFloat *p) ; //------------------------------------------------------------------ //! The rank of the matrices represented by the Matrix class //------------------------------------------------------------------ enum{COLORS=3}; //------------------------------------------------------------------ //! A class of general 3x3 complex matrices. //------------------------------------------------------------------ class Matrix { Float u[2*COLORS*COLORS]; // The matrix friend class Vector; static IFloat inv3; public: // CREATORS //! General constructor; no initialisation. Matrix() {} //! Initialisation to real multiple of the unit matrix. //------------------------------------------------------------------ /*! The diagonal matrix elements (0,0), (1,1) and (2,2) are set to the real number \a c; All other elements are zero. \param c The diagonal matrix element */ Matrix(IFloat c) {*this = c;} //! Initialisation to complex multiple of the unit matrix. //------------------------------------------------------------------ /*! The diagonal matrix elements (0,0), (1,1) and (2,2) are set to the complex number \a c; All other elements are zero. \param c The diagonal matrix element */ Matrix(const Complex& c) {*this = c;} //! Copy constructor //------------------------------------------------------------------ /*! The matrix is initialised as a copy of the matrix \a m. \param m The initialising matrix. */ Matrix(const Matrix& m) { memcpy(u, m.u, sizeof(Float) * COLORS * COLORS * 2); } //! Assignment to real multiple of the unit matrix. //------------------------------------------------------------------ /*! The diagonal matrix elements (0,0), (1,1) and (2,2) are set to the real number \a c; All other elements are zero. \param c The diagonal matrix element */ Matrix& operator=(IFloat c) { this->ZeroMatrix(); u[0] = u[8] = u[16] = c; return *this; } //! Assignment to complex multiple of the unit matrix. //------------------------------------------------------------------ /*! The diagonal matrix elements (0,0), (1,1) and (2,2) are set to the complex number \a c; All other elements are zero. \param c The diagonal matrix element */ Matrix& operator=(const Complex& c) { this->ZeroMatrix(); u[0] = u[8] = u[16] = c.real(); u[1] = u[9] = u[17] = c.imag(); return *this; } //! Overloaded assignment /*! \a m should not alias this matrix */ Matrix& operator=(const Matrix& m) { if(this != &m) memcpy(u, m.u, sizeof(Float) * COLORS * COLORS * 2); return *this; } // MANIPULATORS //! Adds a matrix \a m to this matrix. /*! \param m The matrix to be added. \return The matrix sum. */ Matrix& operator+=(const Matrix& m) { for(int i = 0; i < COLORS * COLORS * 2; ++i) u[i] += m.u[i]; return *this; } Matrix& operator*=(const Matrix& m) { Matrix tmp(*this); mDotMEqual((IFloat *)u, (IFloat *) tmp.u, (IFloat *) m.u); //this->DotMEqual(tmp,m); return *this; } //! Adds a real scalar multiple of the unit matrix to this one. /*! \param c The real scalar multiple \return The matrix sum */ Matrix& operator+=(IFloat c) { u[0] += c; u[8] += c; u[16] += c; return *this; } //! Subtracts a matrix \a m to this matrix. /*! \param m The matrix to be subtracted. \return The matrix difference. */ Matrix& operator-=(const Matrix& m) { vecMinusEquVecSingle((IFloat *)u, (IFloat *)m.u, COLORS*COLORS*2); return *this; } //! Subtracts a real scalar multiple of the unit matrix from this one. /*! \param c The real scalar multiple \return The matrix difference */ Matrix& operator-=(IFloat c) { u[0] -= c; u[8] -= c; u[16] -= c; return *this; } //! Multiplies this matrix by a real scalar. /*! \param c The real scalar \return The multiplied matrix */ Matrix& operator*=(IFloat c) { for(int i = 0; i < COLORS * COLORS * 2; ++i) u[i] *= c; return *this; } //! Multiplies this matrix by a complex scalar. /*! \param c The complex scalar \return The multiplied matrix */ #if 1 Matrix& operator*=(const Complex &c) { for(int i = 0; i < COLORS * COLORS; ++i){ int reidx = 2*i; int imidx = reidx+1; Float rev = u[reidx]; u[reidx] = u[reidx]*c.real() - u[imidx]*c.imag(); u[imidx] = rev*c.imag() + u[imidx]*c.real(); } return *this; } #else // Added by Hantao Matrix &operator*=(const Complex &c) { Complex *uc = (Complex *)u; for(int i = 0; i < COLORS * COLORS; ++i) { uc[i] *= c; } return *this; } #endif Matrix operator+(const Matrix &m)const { Matrix tmp(*this); tmp += m; return tmp; } Matrix operator-(const Matrix &m)const { Matrix tmp(*this); tmp -= m; return tmp; } Matrix operator*(const Matrix &m)const { Matrix tmp; tmp.DotMEqual(*this, m); return tmp; } Matrix operator*(const Complex &c) { Matrix tmp(*this); for(int i = 0; i < COLORS * COLORS; ++i){ int reidx = 2*i; int imidx = reidx+1; Float rev = tmp.u[reidx]; tmp.u[reidx] = u[reidx]*c.real() - u[imidx]*c.imag(); tmp.u[imidx] = rev*c.imag() + u[imidx]*c.real(); } return tmp; } //! Assignment to matrix product; \a ab /*! \param a the matrix \a a \param b the matrix \a b \post This matrix is the matrix product \a ab */ void DotMEqual(const Matrix& a, const Matrix& b) { mDotMEqual((IFloat *)u, (IFloat *) a.u, (IFloat *) b.u);} //! Assignment to matrix product; \a ab /*! \param a the matrix \a a \param b the matrix \a b \post The matrix product \a ab is added to this matrix. */ void DotMPlus(const Matrix& a, const Matrix& b) { mDotMPlus((IFloat *)u, (IFloat *)a.u, (IFloat *)b.u);} //u += a.u * b.u //! Assignment to Matrix transpose. void Trans(const IFloat* m); //! Assignment to matrix transpose. /*! \param m A matrix. \post This matrix is the transpose of \a m. \a m must not be an alias of this matrix/ */ void Trans(const Matrix& m) { Trans((const IFloat *)(m.u)); } //! Assignment to Matrix complex conjugate. void Conj(const IFloat* m); //! Assignment to matrix complex conjugate. /*! \param m A matrix. \post This matrix is the complex conjugate of \a m. \a m must not be an alias of this matrix/ */ void Conj(const Matrix& m) { Conj((const IFloat *)(m.u)); } //! Hermitian conjugate. void Dagger(const Matrix& m) { Dagger((const IFloat *)&m); } void Dagger(){ Matrix dag; dag.Dagger(*this); *this = dag; } void Transpose(const IFloat* m); void Transpose(); //! Determine matrix trace Complex Trace() const{ return Complex(u[0]+u[8]+u[16],u[1]+u[9]+u[17]); } //! Assignment to hermitian conjugate. /*! \param m A matrix. \post This matrix is the hermitian conjugate of \a m. \a a must not be an alias of this matrix */ #ifndef VEC_INLINE void Dagger(const IFloat* m); //! Not what you might think. void TrLessAntiHermMatrix(const Matrix& this_dag); // void TrLessAntiHermMatrix(); void TrLessAntiHermMatrix(){ Matrix dag; dag.Dagger(*this); this->TrLessAntiHermMatrix(dag); } #else //#define TAH_INLINE /*! \param a A linear array representation of a 3x3 complex matrix, such that real part of the (i,j) element is at array position [6i+2j] and the imaginary part of the (i,j) element is at array position [6i+2j+1]. \post This matrix is the hermitian conjugate of \a m. \a a must not be an alias of this matrix. */ inline void Dagger(const IFloat* a) { u[0] = a[0]; u[1] = -a[1]; u[6] = a[2]; u[7] = -a[3]; u[12] = a[4]; u[13] = -a[5]; u[2] = a[6]; u[3] = -a[7]; u[8] = a[8]; u[9] = -a[9]; u[14] = a[10]; u[15] = -a[11]; u[4] = a[12]; u[5] = -a[13]; u[10] = a[14]; u[11] = -a[15]; u[16] = a[16]; u[17] = -a[17]; } /*! \param dag A matrix \a A. \post This matrix is set to\n <em>1/2(M-A) - 1/6 Trace M-A)</em> \n where \a M is the original value of this matrix. */ inline void TrLessAntiHermMatrix(const Matrix& dag) { // get 1/2(A - dag(A)) = 1/2A - dag(1/2A) *this -= dag; IFloat *p = (IFloat *)u; vecTimesEquFloatSingle(p, 0.5, 18); IFloat c = inv3 * (*(p+1) + *(p+9) + *(p+17)); *(p+1) -= c; *(p+9) -= c; *(p+17) -= c; } inline void TrLessAntiHermMatrix() { IFloat *p = (IFloat *)u; *p = *(p+8) = *(p+16)=0.; IFloat tmp = 0.5*(p[2] - p[6]); p[2]=tmp; p[6] = -tmp; tmp = 0.5*(p[3] + p[7]); p[3]=tmp; p[7] = tmp; tmp = 0.5*(p[4] - p[12]); p[4]=tmp; p[12] = -tmp; tmp = 0.5*(p[5] + p[13]); p[5]=tmp; p[13] = tmp; tmp = 0.5*(p[10] - p[14]); p[10]=tmp; p[14] = -tmp; tmp = 0.5*(p[11] + p[15]); p[11]=tmp; p[15] = tmp; IFloat c = inv3 * (*(p+1) + *(p+9) + *(p+17)); p[1] -= c; p[9] -= c; p[17] -= c; } #endif //! Assignment to tensor product of vectors. void Cross2(const Vector& v1, const Vector& v2); //! Assignment to an traceless antihermitian matrix. void AntiHermMatrix(const IFloat *a); // a points to an array of 8 real numbers // *this = i \lamda^i * a^i // \lambda^i are 8 Gellmann matrices //! Force this matrix to be an SU(3) matrix. void Unitarize(void); //! Only do the last step in Unitarize. void Construct3rdRow(void); //! Project this matrix onto SU(3) according to its polar decomposition //! Added by Thomas Dumitrescu 06/2006 int ProjSU3(void); //! Assignment to a unit matrix. //------------------------------------------------------------------ /*! \post This matrix is a 3x3 unit matrix. */ void UnitMatrix(void) { this->ZeroMatrix(); u[0] = u[8] = u[16] = 1.; } //! Assignment to a zero matrix. //------------------------------------------------------------------ /*! \post This matrix is a 3x3 zero matrix. */ void ZeroMatrix(void) { for(int i = 0; i < 18; ++i) { u[i] = 0.; } } //! Assignment to a negated matrix. /*! \param m A matrix. \post This matrix has the value \a -m. */ void NegMatrix(const Matrix& m) { for(int i = 0; i < COLORS * COLORS * 2; ++i) u[i] = -m.u[i]; } //! Assignment to the matrix linear combination 1-xm /*! \param x A real scalar factor \param m A matrix \post This matrix has the value 1-xm. */ void OneMinusfTimesM(IFloat x, const Matrix& m) { oneMinusfTimesMatrix((IFloat *)u, x, (IFloat *)&m, COLORS*COLORS*2); } #ifndef VEC_INLINE // ACCESSORS //! Write access. Complex& operator()(int i, int j); //! Read access. const Complex& operator()(int i, int j) const; #else Complex& operator()(int i, int j) { return ((Complex*)u)[i*COLORS+j]; } const Complex& operator()(int i, int j) const { return ((Complex*)u)[i*COLORS+j]; } #endif //! Write access. /*! \param i A number between 0 and 8 \return The ([i - i mod 3]/3, i mod 3) matrix element Should this method not be private? */ inline Complex& operator[](int i) { return ((Complex*)u)[i]; } //! Read access. /*! \param i A number between 0 and 8 \return The ([i - i mod 3]/3, i mod 3) matrix element */ inline const Complex& operator[](int i) const { return ((Complex*)u)[i]; } inline IFloat elem(int i) { return u[i]; } //! Read access. /*! \param i A number between 0 and 17 \return element of the array */ //! The determinant. void Det(IFloat* c) const; //! Returns the real part of the trace. IFloat ReTr() const { return u[0] + u[8] + u[16]; } //! Returns the trace. Complex Tr() const { return ((Complex*)u)[0] + ((Complex*)u)[4] + ((Complex*)u)[8]; } //! -1/2 times the trace of the square. IFloat NegHalfTrSquare() const; //! The deviation of this matrix from unitarity IFloat ErrorSU3() const; /*! Returns the SU(3) matrix norm, defined by ||X||^2 = -2 trace X^2 */ // !!FIXME: Why does it calculate this? IFloat norm() const { Matrix x2; x2.DotMEqual(*this, *this); return -2.0*x2.ReTr(); } IFloat norm2() const { IFloat *m = (IFloat*)&u[0]; return dotProduct(m, m, 18); } // SU(3) Characters Complex Char3() const { return Tr() ; } ; Complex Char6() const ; Complex Char8() const ; Complex Char10() const ; void FTimesV1PlusV2(Float fb, Matrix *c, Matrix *d, int len) { fTimesV1PlusV2((IFloat *)&u, IFloat(fb), (IFloat *)c, (IFloat *)d, len*18); } Float Norm() { Float sum=0.; for(int i=0; i<2*COLORS*COLORS; i++) sum +=u[i]*u[i]; return sum; } }; inline static Matrix Transpose(const Matrix &m){ Matrix out; out.Trans(m); return out; } //Added by CK inline static Complex Trace(const Matrix &a, const Matrix &b){ //Mapping is i*3 + j Complex out(0.0); //a(0,0)*b(0,0) + a(0,1)*b(1,0) + a(0,2)*b(2,0) out += a[0]*b[0] + a[1]*b[3] + a[2]*b[6]; //a(1,0)*b(0,1) + a(1,1)*b(1,1) + a(1,2)*b(2,1) out += a[3]*b[1] + a[4]*b[4] + a[5]*b[7]; //a(2,0)*b(0,2) + a(2,1)*b(1,2) + a(2,2)*b(2,2) out += a[6]*b[2] + a[7]*b[5] + a[8]*b[8]; return out; } //------------------------------------------------------------------ //! A class implementing a general 3 component complex vector. /*! This is a schizophrenic class. It is really designed to be a class of complex 3-vectors, and many methods carry out operations on just such an object; \e e.g. the overloaded binary operators, the matrix-vector multiplications and the normalisation and orthogonalisation methods. However, some methods, those with take an argument \c int \a len, are really wrappers for functions operating on 1-dimensional floating point arrays of any length. They are meant to be used with an array of Vectors: the first Vector in the array operates not only on its own data but on that of all the other objects in the array by assuming that it is at the beginning of a contiguous floating point array. For the sake of sanity the argument \a len should be a multiple of 6. */ //------------------------------------------------------------------ class Vector { Float v[2*COLORS]; // Vector friend class Matrix; public: // CREATORS Vector() {} //! Overloaded assignment /*! \a x should not alias this matrix */ Vector& operator=(const Vector& x) #if 1 { for(int i=0;i<COLORS*2;i++) v[i] = x.v[i]; #else { moveMem(v, x.v, COLORS*2*sizeof(Float)); #endif return *this; } // MANIPULATORS //! Multiplies this vector by a real scalar. /*! \param p The real scalar \return The multiplied vector */ Vector& operator*=(IFloat p) { vecTimesEquFloatSingle((IFloat *)v, p, COLORS*2); return *this; } //! Adds a vector \a m to this vector. /*! \param m The vector to be added. \return The vector sum. */ Vector& operator+=(const Vector& x) { vecAddEquVec((IFloat *)v, (IFloat *)x.v, COLORS*2); return *this; } //! Subtracts a vector \a m to this vector. /*! \param m The vector to be subtracted. \return The vector difference. */ Vector& operator-=(const Vector& x) { vecMinusEquVecSingle((IFloat *)v, (IFloat *)x.v, COLORS*2); return *this; } //! Assignment to matrix-vector product. /*! \param m A matrix. \param x a vector \post This vector is takes the value Mx */ void DotXEqual(const Matrix& m, const Vector& x) { uDotXEqual((IFloat *)v, (IFloat *) m.u, (IFloat *) x.v); } // v = m.u * x.v, m should be in CRAM, x MUST be in DRAM */ //! Normalisation void Normalize(void); //! Orthogonalisation void Orthogonalize(const Vector& p1); //! Zeroing a color vector /*! added by Sam 1/9/2006 to implement disconnected F.T */ void Zero() { for(int i=0; i<2*COLORS; i++) v[i]=0; } //! simple element access (as a Complex) /*! added by Sam 1/9/2006 to implement disconnected F.T */ Complex& operator[](int i) { return *((Complex*)(v+2*i)); } //-------------------------------------------------------------- // Functions that act on arrays of vectors of general length. // The array of vectors is treated as an array of IFloating // numbers pointed to by &v and having length len. // This set of functions does not really fit the way // Vector is currently defined (as an array with re/im and // color indeces only. It extends the notion of Vector to // a general array of IFloating numbers. //-------------------------------------------------------------- //! Assignment to another vector /*! \param b Another vector \param len The number of real numbers in the vectors. \post This vector = \a b \a b should not alias this vector. */ void CopyVec(const Vector *b, int len) #if 1 { moveFloat((Float *)&v, (const Float *)b, len); } #else { moveMem(&v, b, len*sizeof(Float)); } #endif //! Square norm. /*! \param len The number of real numbers in the vectors. \return The square norm of this vector. */ Float NormSqNode(size_t len) {return Float( dotProduct((IFloat *)&v, (IFloat *)&v, len) ); } //! Square norm with global sum. Float NormSqGlbSum(size_t len); Float NormSqGlbSum4D(size_t len); //! Print the vector content to the screen void Print(int len); //! The real part of the dot product /*! \param b Another vector \param len The number of real numbers in the vectors. \return The real part of the dot product (v,b). */ Float ReDotProductNode(const Vector *b, int len) {return Float( dotProduct((IFloat *)&v, (IFloat *)b, len) ); } //! The real part of the dot product with global sum. Float ReDotProductGlbSum(const Vector *b, int len); Float ReDotProductGlbSum4D(const Vector *b, int len); void NormSqArraySliceSum(Float *f_out, const int size, const int dir); //!< Sum the square norms of vectors in 3-dim slices. void SliceArraySum(Float *sum, const Float *f_in, const int dir); //!< Sum an array of Floats on a 4-dim lattice in 3-dim slices. void SliceArraySumFive(Float *sum, const Float *f_in, const int dir); //!< Sum an array of Floats on a 5-dim lattice in 4-dim slices. //! Assign vector to zero. /*! \param len The number of real numbers in the vectors. \post This vector has the value 0. */ void VecZero(int len) {vecZero((IFloat*)&v, len);} //! Assignment to a negated vector. /*! \param b A vector. \param len The number of real numbers in the vectors. \post This vector has the value \a -b. */ void VecNegative(const Vector *b, int len) {vecNegative((IFloat *)&v, (IFloat *)b, len);} //! Multiplication by a real scalar /*! \param fb The real scalar \param len The number of real numbers in the vectors. \post This vector is multiplied by \a fb */ void VecTimesEquFloat(const Float &fb, size_t len) {vecTimesEquFloat((IFloat *)&v, IFloat(fb), len);} //! Multiplication by a real scalar /*! \param u The input vector \param fb The real scalar \param len The number of real numbers in the vectors. \post This vector is multiplied by \a fb */ void VecEqualsVecTimesEquFloat(const Vector *u, const Float &fb, int len) {vecEqualsVecTimesEquFloat((IFloat *)&v, (IFloat*)u, IFloat(fb), len);} //! Addition of another vector /*! \param b Another vector \param len The number of real numbers in the vectors. \post \a b is added to this vector. */ void VecAddEquVec(const Vector *b, int len) { vecAddEquVec((IFloat *)&v, (IFloat *)b, len);} //! Subtraction of another vector /*! \param b Another vector \param len The number of real numbers in the vectors. \post \a b is subtracted from this vector. */ void VecMinusEquVec(const Vector *b, int len) { vecMinusEquVec((IFloat *)&v, (IFloat *)b, len);} //! Assignment of the linear combination fb * c + d /*! \param fb A real scalar \param c A vector \param d A vector \param len The number of real numbers in the vectors. \post \a This vector takes the value fb * c + d */ // void FTimesV1PlusV2(const Float &fb, const Vector *c, // const Vector *d, int len) void FTimesV1PlusV2(Float fb, Vector *c, Vector *d, int len) #if TARGET == BGL { Float coef = fb; vaxpy3 ((Vector *)v, &coef, c, d, len/6); } #else { fTimesV1PlusV2((IFloat *)&v, IFloat(fb), (IFloat *)c, (IFloat *)d, len); } #endif void FTimesPlusVec(Float fb, Vector *c, int len){ IFloat *a_f = (IFloat *)&v; IFloat *c_f = (IFloat *)&(c->v); #pragma omp parallel for default(shared) for(int i = 0; i < len; ++i) { *(a_f+i) += fb* *(c_f+i); } } //! Assignment of the linear combination fb * c - d /*! \param fb A real scalar \param c A vector \param d A vector \param len The number of real numbers in the vectors. \post This vector takes the value fb * c - d */ void FTimesV1MinusV2(const Float &fb, const Vector *c, const Vector *d, int len) { fTimesV1MinusV2((IFloat *)&v, IFloat(fb), (IFloat *)c, (IFloat *)d, len); } //! The dot product with another vector /*! \param b Another vector \param len The number of real numbers in the vectors. \return The dot product of this vector with b. */ Complex CompDotProductNode(const Vector *b, int len) { IFloat c_r, c_i; compDotProduct(&c_r, &c_i, (IFloat *)&v, (IFloat *)b, len); return Complex(c_r,c_i); } //! The dot product with another vector, with global sum Complex CompDotProductGlbSum(const Vector *b, int len); Complex CompDotProductGlbSum4D(const Vector *b, int len); //! Assignment of the linear combination fb * c + d /*! \param fb A complex scalar \param c A vector \param d A vector \param len The number of real numbers in the vectors. \post This vector takes the value fb * c + d */ void CTimesV1PlusV2(const Complex &fb, const Vector *c, const Vector *d, int len) { cTimesV1PlusV2((IFloat *)&v, real(fb), imag(fb), (IFloat *)c, (IFloat *)d, len); } //! Assignment of the linear combination fb * c - d /*! \param fb A complex scalar \param c A vector \param d A vector \param len The number of real numbers in the vectors. \post \a This vector takes the value fb * c - d */ void CTimesV1MinusV2(const Complex &fb, const Vector *c, const Vector *d, int len) { cTimesV1MinusV2((IFloat *)&v, real(fb), imag(fb), (IFloat *)c, (IFloat *)d, len); } void print(const char *name,size_t f_size){ // Float *v_p = (Float*)v; Float sum=NormSqGlbSum(f_size); VRB.Result("",name,"%0.12g %0.12g %0.12g %0.12g %0.12g %0.12g norm=%0.12g\n", v[0],v[1],v[2],v[3],v[4],v[5],sum); } }; inline void vaxpy3(Vector *res,Float *scale,Vector *mult,Vector *add, int ncvec){ fTimesV1PlusV2((IFloat *)res, (IFloat)*scale, (IFloat *)mult, (IFloat *)add, ncvec*6); } inline void vaxpy3_m(Matrix *res,Float *scale,Matrix *mult,Matrix *add, int ncvec){ fTimesV1PlusV2((IFloat *)res, (IFloat)*scale, (IFloat *)mult, (IFloat *)add, ncvec*6); } #if 0 inline void moveFloattofloat NOINLINE_MACRO (float *out, Float * in, size_t f_size) { Float sum=0.; #pragma omp parallel for reduction(+:sum) for (size_t i = 0; i < f_size; i++) { out[i]=in[i]; // flt = (float) in[i]; // out[i] = flt; sum +=out[i]*out[i]; } glb_sum(&sum); VRB.Result("","moveFloattofloat()","norm=%e\n",sum); }; inline void movefloattoFloat NOINLINE_MACRO (Float * out, float *in, size_t f_size) { // float flt; Float sum=0.; #pragma omp parallel for reduction(+:sum) for (size_t i = 0; i < f_size; i++) { out[i]=in[i]; // flt = in[i]; // out[i] = (Float) flt; sum +=out[i]*out[i]; } glb_sum(&sum); VRB.Result("","moveFloattofloat()","norm=%e\n",sum); }; #endif template < typename AFloat, typename BFloat > void compDotProduct (std::vector < Float > &result, const std::vector < AFloat * >a, const std::vector < BFloat * >b, size_t len) { const char *fname="compDotProduct()"; size_t a_size = a.size (); size_t b_size = b.size (); size_t c_size = len; int a_step = 8; int b_step = 8; int c_step = 8; if (a_size < a_step) a_step = a_size; if (b_size < b_step) b_step = b_size; if (c_size < c_step ) c_step = c_size; VRB.Debug("",fname,"sizes= %d %d %d step = %d %d %d result.size()=%d\n", a_size,b_size,c_size,a_step,b_step,c_step,result.size());fflush(stdout); int nthr=0; #pragma omp parallel { nthr = omp_get_num_threads(); } // assert ((len % c_step) == 0); // assert ((a_size % a_step) == 0); // assert ((b_size % b_step) == 0); result.resize (2 * a_size * b_size, 0.); Float *result_p = result.data(); // exit(-42); //#pragma omp parallel for reduction(+:re,im) for (size_t i = 0; i < a_size; i += a_step) for (size_t j = 0; j < b_size; j += b_step) #pragma omp parallel for for (size_t k = 0; k < c_size; k += c_step) { int ii_end = a_step; if((a_size-i)<a_step) ii_end = a_size-i; int jj_end = b_step; if((b_size-j)<b_step) jj_end = b_size-j; int kk_end = c_step; if((c_size-k)<c_step) kk_end = c_size-k; // printf("%s:end= %d %d %d \n", fname, ii_end,jj_end,kk_end);fflush(stdout); //OMP4( parallel for ) for (size_t kk = 0; kk < kk_end; kk++) { std::vector < Float > re (a_step * b_step*2, 0); size_t ind_k = 2 * (k + kk); for (size_t ii = 0; (ii < ii_end); ii++) { AFloat *a_p = a[i + ii]; for (size_t jj = 0; (jj < jj_end); jj++) { BFloat *b_p = b[j + jj]; // printf("%s: %d %d: (%e %e) (%e %e)\n", fname, // if(!ind_k) VRB.Result("",fname,"%d %d: (%e %e) (%e %e)\n", // i+ii,j+jj, *a_p,*(a_p+1),*b_p, *(b_p+1));fflush(stdout); re[2*(ii + a_step * jj)] += *(a_p + ind_k) * *(b_p + ind_k) + *(a_p + ind_k + 1) * *(b_p + ind_k + 1); re[1+2*(ii + a_step * jj)] += *(a_p + ind_k) * *(b_p + ind_k + 1) - *(a_p + ind_k + 1) * *(b_p + ind_k); } } #if 1 for (size_t ii = 0; ii < ii_end; ii++) for (size_t jj = 0; jj < jj_end ; jj++){ //OMP4( critical ) #pragma omp critical { size_t ind_ij = (i + ii) + a_size * (j + jj); result_p[2 * ind_ij] += re[2*(ii + a_step * jj)]; result_p[2 * ind_ij + 1] += re[1+2*(ii + a_step * jj)]; } } #endif } } } CPS_END_NAMESPACE #endif
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types // <ul> // <li> \ref vector_types_dense_vectors </li> // <li> \ref vector_types_sparse_vectors </li> // </ul> // </li> // <li> \ref vector_operations // <ul> // <li> \ref vector_operations_constructors </li> // <li> \ref vector_operations_assignment </li> // <li> \ref vector_operations_element_access </li> // <li> \ref vector_operations_element_insertion </li> // <li> \ref vector_operations_element_removal </li> // <li> \ref vector_operations_element_lookup </li> // <li> \ref vector_operations_non_modifying_operations </li> // <li> \ref vector_operations_modifying_operations </li> // <li> \ref vector_operations_arithmetic_operations </li> // <li> \ref vector_operations_reduction_operations </li> // <li> \ref vector_operations_norms </li> // <li> \ref vector_operations_scalar_expansion </li> // <li> \ref vector_operations_vector_expansion </li> // <li> \ref vector_operations_statistic_operations </li> // <li> \ref vector_operations_declaration_operations </li> // <li> \ref vector_operations_vector_generators </li> // </ul> // </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types // <ul> // <li> \ref matrix_types_dense_matrices </li> // <li> \ref matrix_types_sparse_matrices </li> // </ul> // </li> // <li> \ref matrix_operations // <ul> // <li> \ref matrix_operations_constructors </li> // <li> \ref matrix_operations_assignment </li> // <li> \ref matrix_operations_element_access </li> // <li> \ref matrix_operations_element_insertion </li> // <li> \ref matrix_operations_element_removal </li> // <li> \ref matrix_operations_element_lookup </li> // <li> \ref matrix_operations_non_modifying_operations </li> // <li> \ref matrix_operations_modifying_operations </li> // <li> \ref matrix_operations_arithmetic_operations </li> // <li> \ref matrix_operations_reduction_operations </li> // <li> \ref matrix_operations_norms </li> // <li> \ref matrix_operations_scalar_expansion </li> // <li> \ref matrix_operations_statistic_operations </li> // <li> \ref matrix_operations_declaration_operations </li> // <li> \ref matrix_operations_matrix_generators </li> // <li> \ref matrix_operations_matrix_inversion </li> // <li> \ref matrix_operations_matrix_exponential </li> // <li> \ref matrix_operations_decomposition </li> // <li> \ref matrix_operations_linear_systems </li> // <li> \ref matrix_operations_eigenvalues </li> // <li> \ref matrix_operations_singularvalues </li> // </ul> // </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_element_selections </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_row_selections </li> // <li> \ref views_columns </li> // <li> \ref views_column_selections </li> // <li> \ref views_bands </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // <li> \ref vector_kronecker_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // <li> \ref matrix_kronecker_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref bitwise_operations // <ul> // <li> \ref bitwise_shift </li> // <li> \ref bitwise_and </li> // <li> \ref bitwise_or </li> // <li> \ref bitwise_xor </li> // </ul> // </li> // <li> \ref logical_operations // <ul> // <li> \ref logical_not </li> // <li> \ref logical_and </li> // <li> \ref logical_or </li> // </ul> // </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref hpx_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref openmp_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref customization // <ul> // <li> \ref configuration_files </li> // <li> \ref vector_and_matrix_customization // <ul> // <li> \ref custom_data_members </li> // <li> \ref custom_operations </li> // <li> \ref custom_data_types </li> // </ul> // </li> // <li> \ref error_reporting_customization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref intra_statement_optimization </li> // <li> \ref faq </li> // <li> \ref issue_creation_guidelines </li> // <li> \ref blaze_references </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // \tableofcontents // // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // Alternatively \b Blaze provides the <tt>./cmake/Blaze_Import</tt> CMake function to import // the \b Blaze library into CMake based projects. This approach includes the configuration // step (see \ref step_2_configuration). To do so you need to import the function file like // any other module/function into your CMake project: \code list(APPEND CMAKE_MODULE_PATH ${BLAZE_LIBRARY_PATH}/cmake) include(Blaze_Import) \endcode // After importing the function script you can import and use the \b Blaze library: \code Blaze_Import(ARGUMENTS) target_link_libraries(TARGET Blaze) \endcode // In this example, \c TARGET is the executable/library using \b Blaze and \c ARGUMENTS is the // configuration you want for building \b Blaze. To configure \b Blaze using the import function // you can set the input arguments like this example: \code Blaze_Import( QUIET BLAS on LAPACK on THREADING Boost CACHE_SIZE auto VECTORIZATION on STORAGE_ORDER rowMajor THRESHOLD_DMATDVECMULT 100000UL THRESHOLD_SMP_DVECDVECADD 1000000UL ) \endcode // For more details about available configuration options please have a look at // \ref configuration_files and the <tt>Blaze_Import.cmake</tt> function script. // // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // An alternate way to install \b Blaze for Windows users is Microsoft's // <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can // be installed via the command line: \code C:\src\vcpkg> .\vcpkg install blaze \endcode // The tool automatically downloads the latest \b Blaze release and copies the header files to // the common include directory. Please note that since \b Blaze is a header-only library the // attempt to install any static or dynamic library will fail! // // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // // \n \section blaze_version Blaze Version // <hr> // // The current major and minor version number of the \b Blaze library can be found in the // <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the // <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros, // which can for instance be used for conditional compilation: \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 7 #define BLAZE_PATCH_VERSION 0 \endcode // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; int main() { // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; } \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -std=c++14 -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = ( 6 ) ( 3 ) ( 2 ) \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <iostream> #include <blaze/Math.h> using namespace blaze; int main() { // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; } \endcode // The output of this program is \code y = ( 16 ) ( 2 ) C = ( -1 -1 ) ( 0 -4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code #include <blaze/Math.h> int main() { const size_t N ( 1000UL ); const size_t iterations( 10UL ); const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers five dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, \ref vector_types_custom_vector, // and \ref vector_types_uniform_vector) and two sparse vector types (\ref vector_types_compressed_vector // and \ref vector_types_zero_vector). All vectors can be specified as either column vectors or row // vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_dense_vectors Dense Vectors // <hr> // // \subsection vector_types_static_vector StaticVector // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, the transpose flag, the alignment, and the // padding of the vector can be specified via the five template parameters: \code template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // - \c AF : specifies whether the first element of the vector is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is \c blaze::aligned. // - \c PF : specifies whether the vector should be padded to maximize the efficiency of // vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded. // The default value is \c blaze::padded. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of an unaligned, unpadded 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c; \endcode // \subsubsection vector_types_static_vector_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticVector are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using VT1 = blaze::StaticVector<double,3UL>; using VT2 = blaze::StaticVector<complex<float>,2UL>; using VT3 = blaze::StaticVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::StaticVector instance may be bigger than the sum of its data // elements: \code sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX sizeof( VT3 ); // Evaluates to 20; no special alignment requirements \endcode // Please note that for this reason an aligned blaze::StaticVector cannot be used in containers // using dynamic memory such as \c std::vector without additionally providing an allocator that // can provide over-aligned memory: \code using Type = blaze::StaticVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection vector_types_static_vector_padding Padding // // Adding padding elements to the end of a blaze::StaticVector can have a significant impact on // the performance. For instance, assuming that AVX is available, then two padded 3-dimensional // vectors of double precision values can be added via a single SIMD addition operation: \code using blaze::StaticVector; using blaze::columnVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; StaticVector<double,3UL,columnVector,aligned,padded> a1, b1, c1; StaticVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2; // ... Initialization c1 = a1 + b1; // AVX-based vector addition; maximum performance c2 = a2 + b2; // Scalar vector addition; limited performance sizeof( a1 ); // Evaluates to 32 for SSE and AVX, and 64 for AVX-512 sizeof( a2 ); // Evaluates to 24 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each vector instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::StaticVector instance is guaranteed to be the sum of its elements. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_dynamic_vector DynamicVector // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \subsection vector_types_hybrid_vector HybridVector // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the maximum number of elements, the transpose flag, the alignment, // and the padding of the vector can be specified via the five template parameters: \code template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // - \c AF : specifies whether the first element of the vector is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is \c blaze::aligned. // - \c PF : specifies whether the vector should be padded to maximize the efficiency of // vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded. // The default value is \c blaze::padded. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a unaligned, unpadded double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c; \endcode // \subsubsection vector_types_hybrid_vector_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridVector are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using VT1 = blaze::HybridVector<double,3UL>; using VT2 = blaze::HybridVector<complex<float>,2UL>; using VT3 = blaze::HybridVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::HybridVector instance may be bigger than an according unaligned // blaze::HybridVector: \code sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX sizeof( VT3 ); // Evaluates to 20; no special alignment requirements \endcode // Please note that for this reason an aligned blaze::HybridVector cannot be used in containers // using dynamic memory such as \c std::vector without additionally providing an allocator that // can provide over-aligned memory: \code using Type = blaze::HybridVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection vector_types_hybrid_vector_padding Padding // // Adding padding elements to the end of a blaze::HybridVector can have a significant impact on // the performance. For instance, assuming that AVX is available, then two padded 3-dimensional // vectors of double precision values can be added via a single SIMD addition operation: \code using blaze::HybridVector; using blaze::columnVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; HybridVector<double,3UL,columnVector,aligned,padded> a1, b1, c1; HybridVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2; // ... Resizing and initialization c1 = a1 + b1; // AVX-based vector addition; maximum performance c2 = a2 + b2; // Scalar vector addition; limited performance sizeof( a1 ); // Evaluates to 48 for SSE, 64 and AVX, and 128 for AVX-512 sizeof( a2 ); // Evaluates to 32 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each vector instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::HybridVector instance is guaranteed to be the sum of its elements plus the // necessary data members to store the current size. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_custom_vector CustomVector // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>; std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( memory1.get(), 9UL, 16UL ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) ); AlignedUnpadded c( memory2.get(), 7UL ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>; std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \subsubsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \subsubsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomVector<int,unaligned,unpadded>; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \subsubsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \subsubsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomVector<double,aligned,padded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomVector<double,aligned,unpadded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \a std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_uniform_vector UniformVector // // The blaze::UniformVector class template is the representation of an arbitrary sized uniform // vector with elements of arbitrary type. It can be included via the header file \code #include <blaze/math/UniformVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class UniformVector; \endcode // - \c Type: specifies the type of the vector elements. UniformVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::UniformVector is the best choice for uniform vectors of any size. Its size can be // modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::UniformVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::UniformVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::UniformVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_sparse_vectors Sparse Vectors // <hr> // // \subsection vector_types_compressed_vector CompressedVector // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n \subsection vector_types_zero_vector ZeroVector // // The blaze::ZeroVector class template is the representation of an immutable, arbitrary sized // zero vector with elements of arbitrary type. It can be included via the header file \code #include <blaze/math/ZeroVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class ZeroVector; \endcode // - \c Type: specifies the type of the vector elements. ZeroVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::ZeroVector is the perfect choice to represent a zero vector: \code // Definition of a 3-dimensional integral zero column vector blaze::ZeroVector<int> a( 3UL ); // Definition of a 6-dimensional single precision zero column vector blaze::ZeroVector<float,blaze::columnVector> b( 6UL ); // Definition of a double precision row vector with size 0 blaze::ZeroVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array, or with a \c std::array. If the vector is initialized from a dynamic array, the // constructor expects the actual size of the array as first argument, the array as second argument. // In case of a static array or \c std::array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); const int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); const std::array<float,3UL> array3{ 1.1F, 2.2F, 3.3F }; blaze::StaticVector<float,3UL> v15( array3 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense and sparse vector classes can be directly initialized by means of an // initializer list: \code blaze::DynamicVector<float> v16{ 1.0F, 2.0F, 3.0F, 4.0F }; blaze::CompressedVector<int> v17{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode // Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector, // \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are sized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values // are initialized as default and in case the size of the initializer list exceeds the size // of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only // the non-zero elements are used to initialize the vector. // // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v18( v7 ); // Instantiation of the dense column vector v17 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v19( v9 ); // Instantiation of the dense row vector v18 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v20( v1 ); // Instantiation of the sparse column vector v19 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v21( v12 ); // Instantiation of the sparse row vector v20 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v22( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v23( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array or \c std::array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; const float array1[2] = { 1.0F, 2.0F }; const std::array<double,5UL> array2{ 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // vector: \code blaze::DynamicVector<float> v1; blaze::CompressedVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode // Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector, // \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are resized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values // are reset to their default value and in case the size of the initializer list exceeds the size // of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only // the non-zero elements are considered. // // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // \subsection vector_operations_subscript_operator_1 Subscript Operator // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the // preferred way to traverse the non-zero elements of a sparse vector is to use iterators. // // \n \subsection vector_operations_iterators Iterators // // An alternate way to traverse the elements contained in a dense or sparse vector is by means // of iterators. For that purpose, all vectors provide the \c begin(), \c cbegin(), \c end(), // and \c cend() members functions. In case of non-const vectors, \c begin() and \c end() return // an \c Iterator, which allows a manipulation of the (non-zero) value. In case of a constant // vector or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned. Iterators // on dense vectors traverse all elements of the vector, including the zero elements. Iterators // on sparse vectors only traverse the non-zero elements. // // The following two examples demonstrate how to traverse the elements of a dense and sparse // vector, respectively: \code using blaze::DynamicVector; DynamicVector<int> v1( 10UL ); // Traversing all elements contained in the vector by Iterator for( DynamicVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { *it = ...; // OK: Write access to the value of the element. ... = *it; // OK: Read access to the value of the element. } // Traversing all elements contained in the vector by ConstIterator for( DynamicVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the value of the element. } // Traversing the vector elements by means of a range-based for loop for( int& i : v1 ) { i = ...; // OK: Write access to the value of the element. ... = i; // OK: Read access to the value of the element. } \endcode \code using blaze::CompressedVector; CompressedVector<int> v2( 10UL ); // ... Initialization of the vector // Traversing the non-zero elements contained in the vector by Iterator for( CompressedVector<int>::Iterator it=v2.begin(); it!=v2.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the non-zero elements contained in the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v2.cbegin(); it!=v2.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v2 ); it!=end( v2 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v2 ); it!=cend( v2 ); ++it ) { // ... } \endcode // \n \subsection vector_operations_data .data() / data() // // Sometimes it is necessary to acquire a pointer to the first element of the underlying array // of a dense vector. For that purpose the \c data() member function or the free \c data() function // can be used: \code // Instantiating a dynamic vector with 10 elements blaze::DynamicVector<int> v( 10UL ); v.data(); // Returns a pointer to the first element of the dynamic vector data( v ); // Same effect as the member function \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, sparse vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. // // \n \subsection vector_operations_subscript_operator_2 Subscript Operator // // The first option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element. // // \n \subsection vector_operations_set .set() // // An alternative to the subscript operator is the \c set() function: In case the element is not // yet contained in the vector the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // \n \subsection vector_operations_insert .insert() // // The insertion of elements can be better controlled via the \c insert() function. In contrast to // the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // \n \subsection vector_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_element_removal Element Removal // <hr> // // \subsection vector_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse vector. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedVector; CompressedVector<int> v( 42 ); // ... Initialization of the vector // Erasing the element at index 21 v.erase( 21 ); // Erasing a single element via iterator v.erase( v.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate v.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] with a value larger than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } ); \endcode // \n \section vector_operations_element_lookup Element Lookup // <hr> // // A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever // accessing a vector element at a specific index a lookup operation is required. Whereas the // subscript operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection vector_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in a sparse // vector. It specifically searches for the element at the given index. In case the element is // found, the function returns an iterator to the element. Otherwise an iterator just past the // last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that // the returned iterator is subject to invalidation due to inserting operations via the subscript // operator, the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the element at index 7. In case the element is not // contained in the vector, the end() iterator is returned. CompressedVector<int>::Iterator pos( a.find( 7 ) ); if( pos != a.end( 7 ) ) { // ... } \endcode // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c lowerBound() function returns an iterator to the first element with an index not less // then the given index. In combination with the \c upperBound() function this function can be // used to create a pair of iterators specifying a range of indices. Note that the returned // iterator is subject to invalidation due to inserting operations via the subscript operator, // the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \subsection vector_operations_upperbound .upperBound() // // The \c upperBound() function returns an iterator to the first element with an index greater then // the given index. In combination with the \c lowerBound() function this function can be used to // create a pair of iterators specifying a range of indices. Note that the returned iterator is // subject to invalidation due to inserting operations via the subscript operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \section vector_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection vector_operations_size .size() / size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() / capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the vector is zero: \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); // Returns true a.resize( 10 ); // Resize to 10 elements isEmpty( a ); // Returns false \endcode // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, element selections, rows, and columns) is in default state if all its elements are // in default state. For instance, in case the vector is instantiated for a built-in integral or // floating point data type, the function returns \c true in case all vector elements are 0 and // \c false in case any vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform() function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors the zero elements are also taken into account! // // // \n \subsection vector_operations_isZero isZero() // // In order to check if all vector elements are zero, the \c isZero() function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isZero( a ) ) { ... } \endcode // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_trans trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_ctrans ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_reverse reverse() // // Via the \c reverse() function is is possible to reverse the elements of a dense or sparse // vector. The following examples demonstrates this by means of a dense vector: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4, 5 }; blaze::DynamicVector<int> b; b = reverse( a ); // Results in ( 5 4 3 2 1 ) \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // \n \subsection vector_operations_noalias noalias() // // The \b Blaze library is able to reliably detect aliasing during the assignment of vectors. // In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate // temporary of the appropriate type to break the aliasing. For instance, in the following // example \b Blaze performs an alias detection in both assignments, but only, in the second // assignment it detects a problematic aliasing and uses an intermediate temporary in order // to be able to compute the correct result: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = x + y; // No problematic aliasing of x, no intermediate temporary is required. x = A * x; // Problematic aliasing of x; intermediate temporary required! \endcode // The detection of aliasing effects, however, takes a small runtime effort. In order to disable // the aliasing detection, the \c noalias() function can be used: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = noalias( x + y ); // No alias detection performed, no intermediate temporary. x = noalias( A * x ); // No alias detection performed, no intermediate temporary. // Note that the final result will be incorrect! \endcode // \warning The \c noalias() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Using \c noalias() in a situation // where an aliasing effect occurs leads to undefined behavior (which can be violated invariants // or wrong computation results)! // // \n \subsection vector_operations_nosimd nosimd() // // By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order // to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable // the SIMD evaluation of any operation: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = nosimd( x + y ); // Disables SIMD for the vector/vector addition x = nosimd( A * x ); // Disables SIMD for the matrix/vector multiplication \endcode // Please note that the main purpose of the \c nosimd() operation is to enable an easy performance // comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation // will likely result in significantly reduced performance! // // // \n \section vector_operations_modifying_operations Modifying Operations // <hr> // // \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n \section vector_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector, multiple vectors, and // a vector and a scalar. // // <b>Single Vector</b> // // If passed a single vector, the functions return the smallest and largest element of the given // dense vector or the smallest and largest non-zero element of the given sparse vector, // respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; min( b ); // Returns 1 max( b ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref vector_operations_reduction_operations section. // // <b>Multiple Vectors</b> // // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // <b>Vector and Scalar</b> // // If passed a dense vector and a scalar, the \c min() and \c max() functions compute the // componentwise minimum or maximum between the given vector and a uniform vector represented by // the scalar value: \code min( a, 0 ); // Results in ( -5, 0, 0, -4 ) min( 0, a ); // Results in ( -5, 0, 0, -4 ) max( a, 0 ); // Results in ( 0, 2, 7, 0 ) max( 0, a ); // Results in ( 0, 2, 7, 0 ) \endcode // \n \subsection vector_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense vector can be computed via \c softmax(). // The resulting dense vector consists of real values in the range (0..1], which add up to 1. \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; // Evaluating the softmax function y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // Results in 1 \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For // each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign() // function \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operators_arg arg() // // The \c arg() function can be used on a dense or sparse vector to compute the phase angle for // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Compute the phase angle of each vector element // ( -2.67795 ) // ( 0.785398 ) StaticVector<double,2UL> b; b = arg( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense vectors: \code blaze::StaticVector<double,3UL> a, b, c; c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector. // If passed a vector and a numeric exponent, the function computes the exponential value of each // element of the vector using the same exponent. If passed a second vector, the function computes // the componentwise exponential value: \code blaze::StaticVector<double,3UL> a, b, c; c = pow( a, 1.2 ); // Computes the exponential value of each element c = pow( a, b ); // Computes the componentwise exponential value \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense vectors: \code blaze::DynamicVector<double> a, b, c; c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the \c map() functions it is possible to execute componentwise custom operations on vectors. // The unary \c map() function can be used to apply a custom operation on each element of a dense // or sparse vector. For instance, the following example demonstrates a custom square root // computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The N-ary \c map() functions can be used to apply an operation componentwise to the elements // of N dense vectors (where \f$ N <= 6 \f$). The following example demonstrates the merging of // two column vectors of double precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( ( 2.1, 0.3) ) // ( (-4.2, 1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // Applying the map() function to a column vector and a row vector results in the outer map of // the two vectors. The following example demonstrates the outer sum of a column vector and a // row vector: \code blaze::DynamicVector<int,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 1 5 0 6 ) // A = ( 4 8 3 9 ) // ( -2 2 -3 3 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = map( v1, v2, []( int a, int b ){ return a + b; } ); \endcode // Although the computation in the two previous examples can be parallelized it is not vectorized // and thus cannot perform at peak performance. However, it is also possible to create vectorized // custom operations. See \ref custom_operations for a detailed overview of the possibilities of // custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used, but the function might be deprecated in future releases of \b Blaze. // // // \n \subsection vector_operations_select select() // // The \c select() function performs a componentwise, conditional selection of elements. Given // the three dense vectors \c cond, \c a, and \c b, in case an element in the \c cond vector // evaluates to \a true, the according element of \a a is selected, in case the \a cond element // evaluates to \a false, the according element of \a b is selected. The following example // demonstrates the use of the \a select() function: \code blaze::DynamicVector<bool> cond{ true, false, true false }; blaze::DynamicVector<int> a{ 1, -1, 1, -1 }; blaze::DynamicVector<int> b{ -2, 2, -2, 2 }; blaze::DynamicVector<int> c; // ... Resizing and initialization c = select( cond, a, b ); // Results in ( 1, 2, 1, 2 ) \endcode // \n \section vector_operations_reduction_operations Reduction Operations // <hr> // // \subsection vector_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs a total reduction of the elements of the given dense vector // or the non-zero elements of the given sparse vector. The following examples demonstrate the // total reduction of a dense and sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection vector_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection vector_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode // \n \subsection vector_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense vector or the // smallest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmin = min( a ); // Results in -2 \endcode \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; const int totalmin = min( a ); // Results in 1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the minimum of the vector is 1. // // \n \subsection vector_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense vector or the // largest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmax = max( a ); // Results in 3 \endcode \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; const int totalmin = max( a ); // Results in -1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the maximum of the vector is -1. // // \n \subsection vector_operations_reduction_operations_argmin argmin() // // The \c argmin() function returns the index of the first smallest element of the given dense // vector. This function can only be used for element types that support the smaller-than // relationship. In case the given vector currently has a size of 0, the returned index is 0. \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const size_t minindex = argmin( a ); // Results in 1 \endcode // \n \subsection vector_operations_reduction_operations_argmax argmax() // // The \c argmax() function returns the index of the first largest element of the given dense // vector. This function can only be used for element types that support the smaller-than // relationship. In case the given vector currently has a size of 0, the returned index is 0. \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const size_t maxindex = argmax( a ); // Results in 2 \endcode // \n \section vector_operations_norms Norms // <hr> // // \subsection vector_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = norm( a ); const double norm2 = norm( b ); \endcode // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = sqrNorm( a ); const double norm2 = sqrNorm( b ); \endcode // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l1Norm( a ); const double norm2 = l1Norm( b ); \endcode // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l2Norm( a ); const double norm2 = l2Norm( b ); \endcode // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l3Norm( a ); const double norm2 = l3Norm( b ); \endcode // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l4Norm( a ); const double norm2 = l4Norm( b ); \endcode // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = lpNorm<2>( a ); // Compile time argument const double norm2 = lpNorm( b, 2.3 ); // Runtime argument \endcode // \n \subsection vector_operations_norms_maxnorm linfNorm() / maxNorm() // // The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given // dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = linfNorm( a ); const double norm2 = maxNorm( b ); \endcode // \n \section vector_operations_scalar_expansion Scalar Expansion // <hr> // // By means of the \c uniform() function it is possible to expand a scalar value into a dense, // uniform vector. By default, the resulting uniform vector is a column vector, but it is possible // to specify the transpose flag explicitly: \code using blaze::columnVector; int scalar = 5; blaze::DynamicVector<int,columnVector> v; // ... Resizing and initialization // Expansion of 'scalar' to a 3-dimensional uniform column vector // // ( 5 ) // ( 5 ) // ( 5 ) // v = uniform( 3UL, scalar ); v = uniform<columnVector>( 3UL, scalar ); \endcode // \n \section vector_operations_vector_expansion Vector Expansion // <hr> // // Via the \c expand() function it is possible to convert a dense or sparse vector into a matrix. // A column vector is expanded into a column-major matrix, a row vector is expanded into a // row-major matrix. As demonstrated by the following examples, \c expand() can be used with both // runtime and compile time parameters: \code blaze::DynamicVector<int,columnVector> a{ 1, 2, 3 }; blaze::CompressedVector<int,rowVector> b{ 1, 0, 3, 0, 5 }; // Expand the dense column vector ( 1 2 3 ) into a dense 3x5 column-major matrix // // ( 1 1 1 1 1 ) // ( 2 2 2 2 2 ) // ( 3 3 3 3 3 ) // expand( a, 5 ); // Runtime parameter expand<5>( a ); // Compile time parameter // Expand the sparse row vector ( 1 0 3 0 5 ) into a sparse 3x5 row-major matrix // // ( 1 0 3 0 5 ) // ( 1 0 3 0 5 ) // ( 1 0 3 0 5 ) // expand( b, 3 ); // Runtime parameter expand<3>( b ); // Compile time parameter \endcode // \n \section vector_operations_statistic_operations Statistic Operations // <hr> // // \subsection vector_operations_mean mean() // // The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or // sparse vector can be computed via the \c mean() function. In case of a sparse vector, both the // non-zero and zero elements are taken into account. The following example demonstrates the // computation of the mean of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double m = mean( v ); // Results in 4.2 (i.e. 21/5) \endcode // In case the size of the given vector is 0, a \a std::invalid_argument is thrown. // // \n \subsection vector_operations_var var() // // The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse vector // can be computed via the \c var() function. In case of a sparse vector, both the non-zero and // zero elements are taken into account. The following example demonstrates the computation of // the variance of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double v = var( v ); // Results in 5.7 \endcode // In case the size of the given vector is smaller than 2, a \a std::invalid_argument is thrown. // // \n \subsection vector_operations_stddev stddev() // // The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a // dense or sparse vector can be computed via the \c stddev() function. In case of a sparse // vector, both the non-zero and zero elements are taken into account. The following example // demonstrates the computation of the standard deviation of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double s = stddev( v ); // Results in 2.38747 \endcode // In case the size of the given vector is smaller than 2, a \a std::invalid_argument is thrown. // // // \n \section vector_operations_declaration_operations Declaration Operations // <hr> // // \subsection vector_operations_declzero declzero() // // The \c declzero() operation can be used to explicitly declare any vector or vector expression // as zero vector: \code blaze::DynamicVector<double> a, b; // ... Resizing and initialization b = declzero( a ); \endcode // Any vector or vector expression that has been declared as zero vector via \c declzero() will // gain all the benefits of a zero vector, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicVector; DynamicVector<double> a, b, c; // ... Resizing and initialization isZero( declzero( a ) ); // Will always return true without runtime effort c = declzero( a ) + b; // Declare the left operand of the vector addition as a // zero vector, i.e. no addition needs to be performed \endcode // \warning The \c declzero() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-zero vector or // vector expression as zero vector via the \c declzero() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \section vector_operations_vector_generators Vector Generators // <hr> // // \subsection vector_operations_generate generate() // // The \c generate() function returns a dense vector filled elementwise via the given custom // operation. By default, the returned vector is a column vector, but this setting can be changed // via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c generate() function: \code using blaze::generate; using blaze::columnVector; using blaze::rowVector; // Generates the homogeneous integer vector ( 2, 2, 2, 2, 2 ) blaze::DynamicVector<int,columnVector> a; a = generate( 5UL, []( size_t index ){ return 2; } ); // Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 ) blaze::DynamicVector<float,columnVector> b; b = generate( 4UL, []( size_t index ){ return 2.1F + 1.1F*index; } ); // Generates the logarithmically spaced double vector ( 1.0, 10.0, 100.0, 1000.0 ) blaze::DynamicVector<double,columnVector> c; c = generate<columnVector>( 4UL, []( size_t index ){ return blaze::exp10( 1.0 + 1.0*index ); } ); // Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) ) using VT = blaze::StaticVector<int,2UL>; blaze::StaticVector<VT,4UL,rowVector> d; d = generate<rowVector>( []( size_t index ) { return evaluate( VT{ 1, 2 } + index ); } ); \endcode // \n \subsection vector_operations_linspace linspace() // // The \c linspace() function returns a dense vector filled with linearly spaced elements. By // default, the returned vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible // to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c linspace() function: \code using blaze::linspace; using blaze::columnVector; using blaze::rowVector; // Generates the linearly spaced integer vector ( 2, 3, 4, 5, 6 ) blaze::DynamicVector<int,columnVector> a; a = linspace( 5UL, 2, 6 ); // Generates the linearly spaced integer vector ( 6, 5, 4, 3, 2 ) blaze::DynamicVector<int,columnVector> b; b = linspace<columnVector>( 5UL, 6, 2 ); // Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 ) blaze::DynamicVector<float,rowVector> c; c = linspace<rowVector>( 4UL, 2.1F, 5.4F ); \endcode // \n \subsection vector_operations_logspace logspace() // // The \c logspace() function returns a dense vector filled with logarithmically spaced elements. // By default, the returned vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible // to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c logspace() function: \code using blaze::logspace; using blaze::columnVector; using blaze::rowVector; // Generates the logarithmically spaced double vector ( 1, 10, 100, 1000 ) blaze::DynamicVector<int,columnVector> a; a = logspace( 4UL, 0, 3 ); // Generates the logarithmically spaced double vector ( 1000.0, 100.0, 10.0, 1.0 ) blaze::DynamicVector<double,rowVector> b; b = logspace<rowVector>( 4UL, 3.0, 0.0 ); \endcode // \n \subsection vector_operations_uniform uniform() // // The \c uniform() function creates a uniform vector of the given size. By default, the // resulting uniform vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c uniform() function: \code using blaze::uniform; using blaze::columnVector; using blaze::rowVector; // Creates the uniform column vector ( 1, 1, 1, 1, 1 ) auto u1 = uniform( 5UL, 1 ); // Creates the uniform column vector ( 1.2, 1.2, 1.2 ) auto u2 = uniform<columnVector>( 3UL, 1.2 ); // Creates the uniform row vector ( 5U, 5U, 5U, 5U ) auto u3 = uniform<rowVector>( 4UL, 5U ); \endcode // \n \subsection vector_operations_zero zero() // // The \c zero() function creates a zero vector of the given element type and size. By default, // the resulting zero vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c zero() function: \code using blaze::zero; using blaze::columnVector; using blaze::rowVector; // Creates the zero column vector ( 0, 0, 0, 0, 0 ) auto z1 = zero<int>( 5UL ); // Creates the zero column vector ( 0.0, 0.0, 0.0 ) auto z2 = zero<double,columnVector>( 3UL ); // Creates the zero row vector ( 0U, 0U, 0U, 0U ) auto z3 = zero<unsigned int,rowVector>( 4UL ); \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers five dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, \ref matrix_types_custom_matrix, // and \ref matrix_types_uniform_matrix) and three sparse matrix types (\ref matrix_types_compressed_matrix, // \ref matrix_types_identity_matrix, and \ref matrix_types_zero_matrix). All matrices can either // be stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_dense_matrices Dense Matrices // <hr> // // \subsection matrix_types_static_matrix StaticMatrix // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, the storage order of the matrix, // the alignment and the padding of the matrix can be specified via the six template parameters: \code template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // - \c AF : specifies whether the first element of every row/column is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is \c blaze::aligned. // - \c PF : specifies whether every row/column of the matrix should be padded to maximize the // efficiency of vectorized operations. Possible values are \c blaze::padded and // \c blaze::unpadded. The default value is \c blaze::padded. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of an unaligned, unpadded 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C; \endcode // \subsubsection matrix_types_static_matrix_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticMatrix are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using MT1 = blaze::StaticMatrix<double,3UL,5UL>; using MT2 = blaze::StaticMatrix<complex<float>,2UL,3UL>; using MT3 = blaze::StaticMatrix<Int,5UL,4UL>; alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::StaticMatrix instance may be bigger than the sum of its data // elements: \code sizeof( MT1 ); // Evaluates to 160 for SSE, and 192 for AVX and AVX-512 sizeof( MT2 ); // Evaluates to 64 for SSE and AVX and 128 for AVX-512 sizeof( MT3 ); // Evaluates to 80; no special alignment requirements \endcode // Please note that for this reason a blaze::StaticMatrix cannot be used in containers using // dynamic memory such as \c std::vector without additionally providing an allocator that can // provide over-aligned memory: \code using Type = blaze::StaticMatrix<double,3UL,5UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection matrix_types_static_matrix_padding Padding // // Adding padding elements to the end of every row or column of a blaze::StaticMatrix can have a // significant impact on the performance. For instance, assuming that AVX is available, then two // padded 3x3 matrices of double precision values can be added with three SIMD addition operations: \code using blaze::StaticMatrix; using blaze::rowMajor; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; StaticMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1; StaticMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2; // ... Initialization C1 = A1 + B1; // AVX-based matrix addition; maximum performance C2 = A2 + B2; // Scalar matrix addition; limited performance sizeof( A1 ); // Evaluates to 96 for SSE and AVX, and 192 for AVX-512 sizeof( A2 ); // Evaluates to 72 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each matrix instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::StaticMatrix instance is guaranteed to be the sum of its elements. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection matrix_types_dynamic_matrix DynamicMatrix // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \subsection matrix_types_hybrid_matrix HybridMatrix // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns, the storage order of the // matrix, the alignment and the padding of the matrix can be specified via the six template // parameters: \code template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF > class HybridMatrix; \endcode // - \c Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c M : specifies the maximum number of rows of the matrix. // - \c N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // - \c AF : specifies whether the first element of every row/column is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is \c blaze::aligned. // - \c PF : specifies whether every row/column of the matrix should be padded to maximize the // efficiency of vectorized operations. Possible values are \c blaze::padded and // \c blaze::unpadded. The default value is \c blaze::padded. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of an unaligned, unpadded 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C; \endcode // \subsubsection matrix_types_hybrid_matrix_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridMatrix are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using MT1 = blaze::HybridMatrix<double,3UL,5UL>; using MT2 = blaze::HybridMatrix<complex<float>,2UL,3UL>; using MT3 = blaze::HybridMatrix<Int,5UL,4UL>; alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::HybridMatrix instance may be bigger than an according unaligned // blaze::HybridMatrix: \code sizeof( MT1 ); // Evaluates to 160 for SSE, 224 for AVX, and 256 for AVX-512 sizeof( MT2 ); // Evaluates to 80 for SSE, 96 for AVX, and 192 for AVX-512 sizeof( MT3 ); // Evaluates to 96; no special alignment requirements \endcode // Please note that for this reason a blaze::HybridMatrix cannot be used in containers using // dynamic memory such as \c std::vector without additionally providing an allocator that can // provide over-aligned memory: \code using Type = blaze::HybridMatrix<double,3UL,5UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection matrix_types_hybrid_matrix_padding Padding // // Adding padding elements to the end of every row or column of a blaze::HybridMatrix can have a // significant impact on the performance. For instance, assuming that AVX is available, then two // padded 3x3 matrices of double precision values can be added with three SIMD addition operations: \code using blaze::HybridMatrix; using blaze::rowMajor; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; HybridMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1; HybridMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2; // ... Initialization C1 = A1 + B1; // AVX-based matrix addition; maximum performance C2 = A2 + B2; // Scalar matrix addition; limited performance sizeof( A1 ); // Evaluates to 112 for SSE, 128 for AVX, and 256 for AVX-512 sizeof( A2 ); // Evaluates to 88 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each matrix instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::HybridMatrix instance is guaranteed to be the sum of its elements plus the. // necessary data members to store the current number of rows and columns. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection matrix_types_custom_matrix CustomMatrix // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>; std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \subsubsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \subsubsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomMatrix<int,unaligned,unpadded>; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \subsubsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \subsubsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomMatrix<double,aligned,padded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomMatrix<double,aligned,unpadded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \subsection matrix_types_uniform_matrix UniformMatrix // // The blaze::UniformMatrix class template is the representation of an arbitrary sized uniform // matrix with elements of arbitrary type. It can be included via the header file \code #include <blaze/math/UniformMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class UniformMatrix; \endcode // - \c Type: specifies the type of the matrix elements. UniformMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::UniformVector is the best choice for uniform matrices of any size. The number of // rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::UniformMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::UniformMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::UniformMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_sparse_matrices Sparse Matrices // <hr> // // \subsection matrix_types_compressed_matrix CompressedMatrix // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \subsection matrix_types_identity_matrix IdentityMatrix // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/IdentityMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class IdentityMatrix; \endcode // - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n \subsection matrix_types_zero_matrix ZeroMatrix // // The blaze::ZeroMatrix class template is the representation of an immutable, arbitrary sized // zero matrix with \f$ M \cdot N \f$ elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/ZeroMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class ZeroMatrix; \endcode // - Type: specifies the type of the matrix elements. ZeroMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::ZeroMatrix is the perfect choice to represent a zero matrix: \code // Definition of a 3x5 integral row-major zero matrix blaze::ZeroMatrix<int> A( 3UL, 5UL ); // Definition of a 6x4 single precision row-major zero matrix blaze::ZeroMatrix<float,blaze::rowMajor> B( 6UL, 4UL ); // Definition of a double precision column-major zero matrix with 0 rows and columns blaze::ZeroMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a dynamic // or static array, or with a \c std::array. If the matrix is initialized from a dynamic array, the // constructor expects the dimensions of values provided by the array as first and second argument, // the array as third argument. In case of a static array or \c std::array, the fixed size of the // array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); const std::array<std::array<float,3UL>,2UL> array3{ { { 1, 2, 3 }, { 4, 5, 6 } } }; blaze::StaticMatrix<int,2UL,3UL> M14( array3 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense and sparse matrix classes can be directly initialized by means of an // initializer list: \code blaze::DynamicMatrix<float,columnMajor> M15{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M16{ { 3 }, { 1 }, { 0, 2 } }; \endcode // Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix, // \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are sized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values // are initialized as default and in case the size of the top-level initializer list does not // match the number of rows of the matrix or the size of any nested list exceeds the number of // columns, a \a std::invalid_argument exception is thrown. In case of sparse matrices, only // the non-zero elements are used to initialize the matrix. // // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M17( M6 ); // Instantiation of the dense row-major matrix M16 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M18( M8 ); // Instantiation of the dense column-major matrix M17 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M19( M7 ); // Instantiation of the compressed column-major matrix // M18 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M20( M8 ); // Instantiation of the compressed row-major matrix // M19 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M21( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M22( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // matrix: \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } }; \endcode // Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix, // \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are resized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values // are reset to their default value and in case the size of the top-level initializer list does // not match the number of rows of the matrix or the size of any nested list exceeds the number // of columns, a \a std::invalid_argument exception is thrown. In case of sparse matrices, only // the non-zero elements are considered. // // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // \subsection matrix_operations_function_call_operator_1 Function Call Operator // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore // the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators. // // \n \subsection matrix_operations_iterators Iterators // // An alternate way to traverse the elements contained in a dense or sparse matrix is by means // of iterators. For that purpose, all matrices provide the \c begin(), \c cbegin(), \c end(), // and \c cend() members functions. Note that it is not possible to traverse all elements of the // matrix, but that it is only possible to traverse elements in a row-wise fashion (in case of // a row-major matrix) or in a column-wise fashion (in case of a column-major matrix). In case of // non-const matrices, \c begin() and \c end() return an \c Iterator, which allows a manipulation // of the (non-zero) value. In case of a constant matrix or in case \c cbegin() or \c cend() are // used a \c ConstIterator is returned. Iterators on dense matrices traverse all elements of the // matrix, including the zero elements. Iterators on sparse matrices only traverse the non-zero // elements. // // The following two examples demonstrate how to traverse the elements of a dense and sparse // matrix, respectively: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<int,rowMajor> M1( 4UL, 6UL ); DynamicMatrix<int,columnMajor> M2( 4UL, 6UL ); // Traversing all elements contained in the row-major matrix by Iterator for( size_t i=0UL; i<M1.rows(); ++i ) { for( DynamicMatrix<int,rowMajor>::Iterator it=M1.begin(i); it!=M1.end(i); ++it ) { *it = ...; // OK: Write access to the value of the element. ... = *it; // OK: Read access to the value of the element. } } // Traversing all elements contained in the column-major matrix by ConstIterator for( size_t j=0UL; j<M2.columns(); ++j ) { for( DynamicMatrix<int,columnMajor>::ConstIterator it=M2.cbegin(j); it!=M2.cend(j); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the value of the element. } } \endcode \code using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<int,rowMajor> M3( 4UL, 6UL ); CompressedMatrix<int,columnMajor> M4( 4UL, 6UL ); // Traversing the non-zero elements contained in the row-major matrix by Iterator for( size_t i=0UL; i<M3.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=M3.begin(i); it!=M3.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the non-zero elements contained in the column-major matrix by ConstIterator for( size_t j=0UL; j<M4.columns(); ++j ) { for( CompressedMatrix<int,columnMajor>::ConstIterator it=M4.cbegin(j); it!=M4.cend(j); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<M3.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( M3, i ); it!=end( M3, i ); ++it ) { // ... } } for( size_t j=0UL; j<M4.columns(); ++j ) { for( CompressedMatrix<int,columnMajor>::ConstIterator it=cbegin( M4, j ); it!=cend( M4, j ); ++it ) { // ... } } \endcode // \n \subsection matrix_operations_data .data() / data() // // Sometimes it is necessary to acquire a pointer to the first element of the underlying array // of a dense matrix. For that purpose the \c data() member function or the free \c data() function // can be used: \code // Instantiating a dynamic vector with 10 elements blaze::DynamicMatrix<int> A( 5UL, 7UL ); A.data(); // Returns a pointer to the first element of the dynamic matrix data( A ); // Same effect as the member function \endcode // Note that you can NOT assume that all matrix elements lie adjacent to each other! The dense // matrix may use techniques such as padding to improve the alignment of the data. Whereas the // number of elements within a row/column are given by the \ref matrix_operations_rows "rows()" and // \ref matrix_operations_columns "columns()" functions, respectively, the total number of elements including // padding is given by the \ref matrix_operations_spacing "spacing()" function. // // // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. // // \n \subsection matrix_operations_function_call_operator_2 Function Call Operator // // The first possibility to add elements to a sparse matrix is the function call operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element. // // \n \subsection matrix_operations_set .set() // // An alternative to the function call operator is the \c set() function: In case the element is // not yet contained in the matrix the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // \n \subsection matrix_operations_insert .insert() // The insertion of elements can be better controlled via the \c insert() function. In contrast // to the function call operator and the \c set() function it emits an exception in case the // element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // \n \subsection matrix_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_element_removal Element Removal // <hr> // // \subsection matrix_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse matrix. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Erasing the element at position (21,23) A.erase( 21, 23 ); // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate A.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); \endcode // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever // accessing a matrix element at a specific position a lookup operation is required. Whereas the // function call operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection matrix_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in the // sparse matrix. It specifically searches for the element at the specified position. In case // the element is found, the function returns an iterator to the element. Otherwise an iterator // just past the last non-zero element of the according row or column (the \c end() iterator) // is returned. Note that the returned iterator is subject to invalidation due to inserting // operations via the function call operator, the \c set() function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the element at position (7,17). In case the element is not // contained in the vector, the end() iterator of row 7 is returned. CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); if( pos != A.end( 7 ) ) { // ... } \endcode // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index not less then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index not less then the given row // index. In combination with the \c upperBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of column index 17 in row 7. CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); // Searching the upper bound of column index 28 in row 7 CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); \endcode // \n \subsection matrix_operations_upperbound .upperBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index greater then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index greater then the given row // index. In combination with the \c lowerBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of row index 17 in column 9. CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); // Searching the upper bound of row index 28 in column 9 CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); \endcode // \n \section matrix_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection matrix_operations_rows .rows() / rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() / columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \subsection matrix_operations_size size() // // The \c size() function returns the total number of elements of a matrix: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 \endcode // \subsection matrix_operations_spacing .spacing() / spacing() // // The total number of elements of a row or column of a dense matrix, including potential padding // elements, can be acquired via the \c spacing member function. In case of a row-major matrix // (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing // between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to // blaze::columnMajor) the function returns the spacing between two columns: \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); // Returns the total number of elements in a row // Instantiating a column-major dynamic matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns the total number of element in a column \endcode // Alternatively, the free functions \c spacing() can be used to query the current number of // elements in a row/column. \code spacing( M1 ); // Returns the total number of elements in a row spacing( M2 ); // Returns the total number of elements in a column \endcode // \n \subsection matrix_operations_capacity .capacity() / capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the matrix is zero: \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); // Returns true A.resize( 5, 0 ); // Resize to a 5x0 matrix isEmpty( A ); // Returns true A.resize( 5, 3 ); // Resize to a 5x3 matrix isEmpty( A ); // Returns false \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform() function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_isZero isZero() // // In order to check if all matrix elements are zero, the \c isZero() function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isZero( A ) ) { ... } \endcode // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_ispositivedefinite isPositiveDefinite() // // The \c isPositiveDefinite() function checks if the given dense matrix is positive definite. \code blaze::DynamicMatrix<double> A; // ... Initialization if( isPositiveDefinite( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be positive definite! // // \note The \c isPositiveDefinite() function can only be used for dense matrices with \c float, // \c double, \c complex<float> or \c complex<double> element type. The attempt to call the // function with matrices of any other element type or with a sparse matrix results in a compile // time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_matrix_trans trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // \n \subsection matrix_operations_reverse reverse() // // Via the \c reverse() function is is possible to reverse the rows or columns of a dense or sparse // matrix. The following examples gives an impression of both alternatives: \code blaze::DynamicMatrix<int,rowMajor> A{ { 1, 0, 2, 3 }, { 2, 4, 0, 1 }, { 0, 3, 1, 0 } }; blaze::DynamicMatrix<int> B; // Reversing the rows result in the matrix // // ( 0 3 1 0 ) // ( 2 4 0 1 ) // ( 1 0 2 3 ) // B = reverse<rowwise>( A ); // Reversing the columns result in the matrix // // ( 3 2 0 1 ) // ( 1 0 4 2 ) // ( 0 1 3 0 ) // B = reverse<columnwise>( A ); \endcode // \n \subsection matrix_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // \n \subsection matrix_operations_noalias noalias() // // The \b Blaze library is able to reliably detect aliasing during the assignment of matrices. // In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate // temporary of the appropriate type to break the aliasing. For instance, in the following // example \b Blaze performs an alias detection in both assignments, but only, in the second // assignment it detects a problematic aliasing and uses an intermediate temporary in order // to be able to compute the correct result: \code blaze::DynamicMatrix<double> A, B; A = A + B; // No problematic aliasing of A, no intermediate temporary is required. A = A * B; // Problematic aliasing of A; intermediate temporary required! \endcode // The detection of aliasing effects, however, takes a small runtime effort. In order to disable // the aliasing detection, the \c noalias() function can be used: \code blaze::DynamicMatrix<double> A, B; A = noalias( A + B ); // No alias detection performed, no intermediate temporary. A = noalias( A * B ); // No alias detection performed, no intermediate temporary. // Note that the final result will be incorrect! \endcode // \warning The \c noalias() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Using \c noalias() in a situation // where an aliasing effect occurs leads to undefined behavior (which can be violated invariants // or wrong computation results)! // // \n \subsection matrix_operations_nosimd nosimd() // // By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order // to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable // the SIMD evaluation of any operation: \code blaze::DynamicMatrix<double> A, B; A = nosimd( A + B ); // Disables SIMD for the matrix/matrix addition A = nosimd( A * B ); // Disables SIMD for the matrix/matrix multiplication \endcode // Please note that the main purpose of the \c nosimd() operation is to enable an easy performance // comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation // will likely result in significantly reduced performance! // // // \n \section matrix_operations_modifying_operations Modifying Operations // <hr> // // \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_matrix_transpose transpose() // // In addition to the non-modifying \c trans() function, matrices can be transposed in-place via // the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_ctranspose ctranspose() // // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection matrix_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single matrix, multiple matrices, and // a matrix and a scalar. // // <b>Single Matrix</b> // // If passed a single matrix, the functions return the smallest and largest element of the given // dense matrix or the smallest and largest non-zero element of the given sparse matrix, // respectively: \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; min( A ); // Returns -5 max( A ); // Returns 7 \endcode \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; min( B ); // Returns 1 max( B ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref matrix_operations_reduction_operations section. // // <b>Multiple Matrices</b> // // If passed two or more dense matrices, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given matrices, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } }; min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode // Please note that sparse matrices can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // <b>Matrix and Scalar</b> // // If passed a dense matrix and a scalar, the \c min() and \c max() functions compute the // componentwise minimum or maximum between the given matrix and a uniform matrix represented by // the scalar value: \code min( A, 0 ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 ) min( 0, A ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 ) max( A, 0 ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 ) max( 0, A ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 ) \endcode // \n \subsection matrix_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense matrix can be computed via \c softmax(). // The resulting dense matrix consists of real values in the range (0..1], which add up to 1. \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; // Evaluating the softmax function B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double b = sum( B ); // Results in 1 \endcode // Alternatively it is possible to compute a row- or columnwise \c softmax() function. The // resulting dense matrix consists of real values in the range (0..1], which add up to the number // of rows or columns, respectively. \code using blaze::rowwise; using blaze::columnwise; blaze::StaticMatrix<double,3UL,3UL> C, D; // Evaluating the rowwise softmax function C = softmax<rowwise>( A ); // Results in ( 0.0900306 0.244728 0.665241 ) // ( 0.843795 0.0420101 0.114195 ) // ( 0.259496 0.705385 0.035119 ) double c = sum( C ); // Results in 3 (the number of rows of A) // Evaluating the columnwise softmax function D = softmax<columnwise>( A ); // Results in ( 0.035119 0.114195 0.665241 ) // ( 0.705385 0.0420101 0.244728 ) // ( 0.259496 0.843795 0.0900306 ) double d = sum( D ); // Results in 3 (the number of columns of A) \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operators_rank rank() // // The \c rank() function computes the rank of a given dense matrix: \code blaze::DynamicMatrix<double> A( 5UL, 8UL ); // ... Initialization rank( A ); \endcode // The rank is determined as the number of singular values greater than a given tolerance. This // tolerance is computed as \code tolerance = max(m,n) * max(s) * epsilon, \endcode // where \c m is the number of rows of the dense matrix, \c n is the number of columns of the // dense matrix, \c max(s) is the maximum singular value of the dense matrix and \c epsilon is // the difference between 1 and the least value greater than 1 that is representable by the // floating point type of the singular values. // // \note The \c rank() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For // each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of // the \c sign() function \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_arg arg() // // The \c arg() function can be used on a dense or sparse matrix to compute the phase angle for // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the phase angle of each matrix element // ( 0.0 -2.67795 ) // ( 0.785398 1.5708 ) StaticMatrix<double,2UL,2UL> B; B = arg( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense matrices: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix. // If passed a matrix and a numeric exponent, the function computes the exponential value of each // element of the matrix using the same exponent. If passed a second matrix, the function computes // the componentwise exponential value: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = pow( A, 1.2 ); // Computes the exponential value of each element C = pow( A, B ); // Computes the componentwise exponential value \endcode // \n \subsection matrix_operators_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense matrices: \code blaze::DynamicMatrix<double> A, B, C; C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the \c map() functions it is possible to execute componentwise custom operations on matrices. // The unary \c map() function can be used to apply a custom operation on each element of a // dense or sparse matrix. For instance, the following example demonstrates a custom square root // computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The N-ary \c map() functions can be used to apply an operation componentwise to the elements // of N dense matrices (where \f$ N <= 6 \f$). The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( ( 2.1, 0.3) (-4.2, 1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used, but the function might be deprecated in future releases of \b Blaze. // // // \n \subsection matrix_operations_select select() // // The \c select() function performs a componentwise, conditional selection of elements. Given // the three dense matrices \c cond, \c A, and \c B, in case an element in the \c cond vector // evaluates to \a true, the according element of \a A is selected, in case the \a cond element // evaluates to \a false, the according element of \a B is selected. The following example // demonstrates the use of the \a select() function: \code blaze::DynamicMatrix<bool> cond{ { true, false }, { true false } }; blaze::DynamicMatrix<int> A{ { 1, -1 }, { 1, -1 } }; blaze::DynamicMatrix<int> B{ { -2, 2 }, { -2, 2 } }; blaze::DynamicMatrix<int> C; // ... Resizing and initialization C = select( cond, A, B ); // Results in ( 1, 2 ) ( 1, 2 ) \endcode // \n \section matrix_operations_reduction_operations Reduction Operations // <hr> // // \subsection matrix_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise // reduction of the elements of the given dense matrix or the non-zero elements of the given sparse // matrix. The following examples demonstrate the total reduction of a dense and sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a // column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the // (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In // case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise // and the result is a column vector: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing and initialization colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); \endcode \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... Resizing and initialization rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection matrix_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a // column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colsum1, colsum2; colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = sum<columnwise>( B ); // Same result \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( B ); // Same result \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a // column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colprod1, colprod2; colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode // Please note that the evaluation order of the \c prod() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense matrix or the // smallest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmin = min( A ); // Results in 1 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; const int totalmin = min( A ); // Results in 1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the minimum of this matrix is 1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the // smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the smallest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colmin1, colmin2; colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // \n \subsection matrix_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense matrix or the // largest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmax = max( A ); // Results in 4 \endcode \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; const int totalmax = max( A ); // Results in -1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the maximum of this matrix is -1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the // largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the largest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,rowVector> colmax1, colmax2; colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // // \n \section matrix_operations_norms Norms // <hr> // // \subsection matrix_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = norm( A ); const double norm2 = norm( B ); \endcode // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = sqrNorm( A ); const double norm2 = sqrNorm( B ); \endcode // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l1Norm( A ); const double norm2 = l1Norm( B ); \endcode // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l2Norm( A ); const double norm2 = l2Norm( B ); \endcode // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l3Norm( A ); const double norm2 = l3Norm( B ); \endcode // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l4Norm( A ); const double norm2 = l4Norm( B ); \endcode // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = lpNorm<2>( A ); // Compile time argument const double norm2 = lpNorm( B, 2.3 ); // Runtime argument \endcode // \n \subsection matrix_operations_norms_maxnorm linfNorm() / maxNorm() // // The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given // dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = linfNorm( A ); const double norm2 = maxNorm( B ); \endcode // \n \section matrix_operations_scalar_expansion Scalar Expansion // <hr> // // By means of the \c uniform() function it is possible to expand a scalar value into a dense, // uniform matrix. By default, the resulting uniform matrix is a row-major matrix, but it is // possible to specify the storage order explicitly: \code using blaze::rowMajor; int scalar = 5; blaze::DynamicMatrix<int,rowMajor> A; // ... Resizing and initialization // Expansion of 'scalar' to a 3x5 row-major matrix // // ( 5 5 5 5 5 ) // ( 5 5 5 5 5 ) // ( 5 5 5 5 5 ) // A = uniform( 3UL, 5UL, scalar ); A = uniform<columnMajor>( 3UL, 5UL, scalar ); \endcode // \n \section matrix_operations_statistic_operations Statistic Operations // <hr> // // \subsection matrix_operations_mean mean() // // The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or // sparse matrix can be computed via the \c mean() function. In case of a sparse matrix, both the // non-zero and zero elements are taken into account. The following example demonstrates the // computation of the mean of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 } , { 2, 6, 3, 1, 0 } }; const double m = mean( A ); // Results in 3.3 (i.e. 33/10) \endcode // In case the number of rows or columns of the given matrix is 0, a \a std::invalid_argument is // thrown. // // Alternatively it is possible to compute the row- or columnwise mean: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 } , { 2, 6, 3, 1, 0 } }; blaze::DynamicVector<double,columnVector> rm; blaze::DynamicVector<double,rowVector> cm; rm = mean<rowwise>( A ); // Results in ( 4.2 2.4 ) cm = mean<columnwise>( A ); // Results in ( 1.5 5.0 3.0 3.5 3.5 ) \endcode // In case the rowwise mean is computed and the number of columns of the given matrix is 0 or // in case the columnwise mean is computed and the number of rows of the given matrix is 0, a // \a std::invalid_argument is thrown. // // \n \subsection matrix_operations_var var() // // The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse matrix // can be computed via the \c var() function. In case of a sparse vector, both the non-zero and // zero elements are taken into account. The following example demonstrates the computation of // the variance of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; const double v = var( A ); // Results in 6.5 \endcode // In case the size of the given matrix is smaller than 2, a \a std::invalid_argument is thrown. // // Alternatively it is possible to compute the row- or columnwise variance: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; blaze::DynamicVector<double,columnVector> rv; blaze::DynamicVector<double,rowVector> cv; rv = var<rowwise>( A ); // Results in ( 1 4 9 ) cv = var<columnwise>( A ); // Results in ( 19 3 1 ) \endcode // In case the rowwise varoamce is computed and the number of columns of the given matrix is // smaller than 2 or in case the columnwise mean is computed and the number of rows of the given // matrix is smaller than 2, a \a std::invalid_argument is thrown. // // \n \subsection matrix_operations_stddev stddev() // // The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a // dense or sparse matrix can be computed via the \c stddev() function. In case of a sparse // vector, both the non-zero and zero elements are taken into account. The following example // demonstrates the computation of the standard deviation of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; const double s = stddev( A ); // Results in sqrt(6.5) \endcode // In case the size of the given matrix is smaller than 2, a \a std::invalid_argument is thrown. // // Alternatively it is possible to compute the row- or columnwise standard deviation: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; blaze::DynamicVector<double,columnVector> rs; blaze::DynamicVector<double,rowVector> cs; rs = stddev<rowwise>( A ); // Results in ( 1 2 3 ) cs = stddev<columnwise>( A ); // Results in ( sqrt(19) sqrt(3) 1 ) \endcode // In case the rowwise standard deviation is computed and the number of columns of the given // matrix is smaller than 2 or in case the columnwise mean is computed and the number of rows of // the given matrix is smaller than 2, a \a std::invalid_argument is thrown. // // // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declunilow declunilow() // // The \c declunilow() operation can be used to explicitly declare any matrix or matrix expression // as lower unitriangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declunilow( A ); \endcode // Any matrix or matrix expression that has been declared as lower unitriangular via \c declunilow() // will gain all the benefits of a lower unitriangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; DynamicMatrix<double> A, B, C; UniLowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isUniLower( declunilow( A ) ); // Will always return true without runtime effort L = declunilow( A ); // Omit any runtime check for A being an unilower matrix C = declunilow( A * B ); // Declare the result of the matrix multiplication as lower // unitriangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declunilow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-unilower matrix or // matrix expression as lower unitriangular via the \c declunilow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declstrlow declstrlow() // // The \c declstrlow() operation can be used to explicitly declare any matrix or matrix expression // as strictly lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declstrlow( A ); \endcode // Any matrix or matrix expression that has been declared as strictly lower triangular via // \c declstrlow() will gain all the benefits of a strictly lower triangular matrix, which range // from reduced runtime checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::StrictlyLowerMatrix; DynamicMatrix<double> A, B, C; StrictlyLowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isStrictlyLower( declstrlow( A ) ); // Will always return true without runtime effort L = declstrlow( A ); // Omit any runtime check for A being a strictly lower matrix C = declstrlow( A * B ); // Declare the result of the matrix multiplication as strictly lower // triangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declstrlow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-strictly-lower matrix // or matrix expression as strictly lower triangular via the \c declstrlow() operation leads to // undefined behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of an upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being an upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decluniupp decluniupp() // // The \c decluniupp() operation can be used to explicitly declare any matrix or matrix expression // as upper unitriangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decluniupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper unitriangular via \c decluniupp() // will gain all the benefits of a upper unitriangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UniUpperMatrix; DynamicMatrix<double> A, B, C; UniUpperMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isUniUpper( decluniupp( A ) ); // Will always return true without runtime effort L = decluniupp( A ); // Omit any runtime check for A being an uniupper matrix C = decluniupp( A * B ); // Declare the result of the matrix multiplication as upper // unitriangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c decluniupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-uniupper matrix or // matrix expression as upper unitriangular via the \c decluniupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declstrupp declstrupp() // // The \c declstrupp() operation can be used to explicitly declare any matrix or matrix expression // as strictly upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declstrupp( A ); \endcode // Any matrix or matrix expression that has been declared as strictly upper triangular via // \c declstrupp() will gain all the benefits of a strictly upper triangular matrix, which range // from reduced runtime checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::StrictlyUpperMatrix; DynamicMatrix<double> A, B, C; StrictlyUpperMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isStrictlyUpper( declstrupp( A ) ); // Will always return true without runtime effort L = declstrupp( A ); // Omit any runtime check for A being a strictly upper matrix C = declstrupp( A * B ); // Declare the result of the matrix multiplication as strictly upper // triangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declstrupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-strictly-upper matrix // or matrix expression as strictly upper triangular via the \c declstrupp() operation leads to // undefined behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declzero declzero() // // The \c declzero() operation can be used to explicitly declare any matrix or matrix expression // as zero matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declzero( A ); \endcode // Any matrix or matrix expression that has been declared as zero matrix via \c declzero() will // gain all the benefits of a zero matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; DynamicMatrix<double> A, B, C; // ... Resizing and initialization isZero( declzero( A ) ); // Will always return true without runtime effort C = declzero( A ) + B; // Declare the left operand of the matrix addition as a // zero matrix, i.e. no addition needs to be performed \endcode // \warning The \c declzero() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-zero matrix or // matrix expression as zero matrix via the \c declzero() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_generators Matrix Generators // <hr> // // \subsection matrix_operations_generate generate() // // The \c generate() function returns a dense matrix filled elementwise via the given custom // binary operation. By default, the returned matrix is a row-major matrix, but this setting can // be changed via the \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively // it is possible to specify the storage order explicitly.\n // The following example demonstrates the use of the \c generate() function: \code using blaze::generate; using blaze::rowMajor; using blaze::columnMajor> // Generates the uniform integer matrix ( ( 2, 2, 2 ), ( 2, 2, 2 ) ) blaze::DynamicMatrix<int,rowMajor> A; A = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2; } ); // Generates the linearly spaced float matrix ( ( 2.1, 3.2, 4.3 ), ( 5.4, 6.5, 7.6 ) ) blaze::DynamicMatrix<float,rowMajor> B; B = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2.1F + 1.1F*(i*3UL+j); } ); // Generates the logarithmically spaced double vector ( ( 1.0, 10.0 ), ( 100.0, 1000.0 ) ) blaze::DynamicMatrix<double,rowMajor> C; C = generate<rowMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return blaze::exp10( 1.0 + 1.0*(i*2UL+j) ); } ); // Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) ) using VT = StaticVector<int,2UL>; blaze::DynamicMatrix<VT,columnMajor> D; D = generate<columnMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return evaluate( VT{ 1, 2 } + (i*2UL+j) ); } ); \endcode // \n \subsection matrix_operations_uniform uniform() // // The \c uniform() function creates a uniform matrix of the given size. By default, the // resulting uniform matrix is a row-major matrix, but this setting can be changed via the // \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is // possible to specify the storage order explicitly.\n // The following example demonstrates the use of the \c uniform() function: \code using blaze::uniform; using blaze::rowMajor; using blaze::columnMajor; // Creates the uniform row-major matrix // ( 1, 1, 1, 1, 1 ) // ( 1, 1, 1, 1, 1 ) auto U1 = uniform( 2UL, 5UL, 1 ); // Creates the uniform row-major matrix // ( 1.2, 1.2 ) // ( 1.2, 1.2 ) // ( 1.2, 1.2 ) auto U2 = uniform<rowMajor>( 3UL, 2UL, 1.2 ); // Creates the uniform column-major matrix // ( 5U, 5U, 5U, 5U, 5U, 5U, 5U ) // ( 5U, 5U, 5U, 5U, 5U, 5U, 5U ) auto U3 = uniform<columnMajor>( 2UL, 7UL, 5U ); \endcode // \n \subsection matrix_operations_zero zero() // // The \c zero() function creates a zero matrix of the given element type and size. By default, // the resulting zero matrix is a row-major matrix, but this setting can be changed via the // \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is possible // to specify the storage order explicitly.\n // The following example demonstrates the use of the \c zero() function: \code using blaze::zero; using blaze::rowMajor; using blaze::columnMajor; // Creates the row-major zero matrix // ( 0, 0, 0, 0, 0 ) // ( 0, 0, 0, 0, 0 ) auto Z1 = zero<int>( 2UL, 5UL ); // Creates the row-major zero matrix // ( 0.0, 0.0 ) // ( 0.0, 0.0 ) // ( 0.0, 0.0 ) auto Z2 = zero<double,rowMajor>( 3UL, 2UL ); // Creates the column-major zero matrix // ( 0U, 0U, 0U, 0U, 0U, 0U, 0U ) // ( 0U, 0U, 0U, 0U, 0U, 0U, 0U ) auto Z3 = zero<unsigned int,columnMajor>( 2UL, 7UL ); \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // by means of \ref matrix_operations_declaration_operations when calling the \c invert() function: \code invert( declsym( A ) ); // In-place inversion of a symmetric matrix invert( declherm( A ) ); // In-place inversion of an Hermitian matrix invert( decllow( A ) ); // In-place inversion of a lower triangular matrix invert( declunilow( A ) ); // In-place inversion of a lower unitriangular matrix invert( declupp( A ) ); // In-place inversion of an upper triangular matrix invert( decluniupp( A ) ); // In-place inversion of an upper unitriangular matrix invert( decldiag( A ) ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of an Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_matrix_exponential Matrix Exponential // <hr> // // The matrix exponential of a \f$N \times N\f$ matrix \f$ X \f$ is defined as \f[ e^X = \sum\limits_{k=0}^\infty \frac{1}{k!} X^k. \f] // In order to compute the matrix exponential of a square dense matrix, the \c matexp() function // can be used: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = matexp( A ); // Compute the exponential of A \endcode // \note The matrix exponential can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type results in a compile time error! // // \note It is not possible to use any kind of view on the expression object returned by the // \c matexp() function. Also, it is not possible to access individual elements via the function // call operator on the expression object: \code row( matexp( A ), 2UL ); // Compilation error: Views cannot be used on an matexp() expression! matexp( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_linear_systems Linear Systems // <hr> // // The \c solve() function computes a solution for the given dense linear system of equations (LSE) // \f$ A*x=b \f$, where \c A is the given system matrix, \c x is the solution vector, and \c b is // the given dense right-hand side vector: \code blaze::DynamicMatrix<double> A; // The square general system matrix blaze::DynamicVector<double> b; // The right-hand side vector // ... Resizing and initialization blaze::DynamicVector<double> x; // The solution vector solve( A, x, b ); // Computing the solution x x = solve( A, b ); // Alternative syntax \endcode // Alternatively, \c solve() computes a solution for the given dense LSE \f$ A*X=B \f$, where \c A // is the given dense system matrix, the columns of \c X are the solution vectors, and the columns // of \c B are the given right-hand side vectors: \code blaze::DynamicMatrix<double> A; // The square general system matrix blaze::DynamicMatrix<double> B; // The right-hand side matrix // ... Resizing and initialization blaze::DynamicMatrix<double> X; // The solution matrix solve( A, X, B ); // Computing the solutions X X = solve( A, B ); // Alternative syntax \endcode // Both \c solve() functions will automatically select the most suited direct solver algorithm // depending on the size and type of the given system matrix. For small matrices of up to 6x6, // both functions use manually optimized kernels for maximum performance. For matrices larger // than 6x6 the computation is performed by means of the most suited LAPACK solver method (see // \ref lapack_linear_system_solver). // // In case the type of the matrix does not provide additional compile time information about // its structure (symmetric, lower, upper, diagonal, ...), the information can be provided // manually by means of \ref matrix_operations_declaration_operations when calling the \c solve() // functions: \code blaze::DynamicMatrix<double> A; // The square lower system matrix blaze::DynamicVector<double> b; // The right-hand side vector // ... Resizing and initialization blaze::DynamicVector<double> x; // The solution vector solve( declsym( A ), x, b ); // Solving the LSE with a symmetric system matrix solve( declherm( A ), x, b ); // Solving the LSE with an Hermitian system matrix solve( decllow( A ), x, b ); // Solving the LSE with a lower system matrix solve( declunilow( A ), x, b ); // Solving the LSE with an unilower system matrix solve( declupp( A ), x, b ); // Solving the LSE with an upper system matrix solve( decluniupp( A ), x, b ); // Solving the LSE with an uniupper system matrix solve( decldiag( A ), x, b ); // Solving the LSE with a diagonal system matrix \endcode // For both \c solve() functions the computation fails if ... // // - ... the given matrix is not a square matrix; // - ... the size of the right-hand side vector doesn't match the dimensions of the system matrix; // - ... the number of rows of the right-hand side matrix doesn't match the dimensions of the system matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The \c solve() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions may make use of LAPACK kernels. Thus the functions can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error will // be created. // // \note It is not possible to use any kind of view on the expression object returned by the // two-argument \c solve() function. Also, it is not possible to access individual elements via // the function call operator on the expression object: \code row( solve( A, b ), 2UL ); // Compilation error: Views cannot be used on an solve() expression! solve( A, b )[2]; // Compilation error: It is not possible to access individual elements! rows( solve( A, B ), { 2UL, 4UL } ); // Compilation error: Views cannot be used on an solve() expression! solve( A, B )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The \c solve() functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the solution vector or matrix may already have been modified. // // // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions. // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode // The one- and two-argument functions compute only the eigenvalues of the given \a n-by-\a n // matrix, the three-argument function additionally computes the eigenvectors. The eigenvalues // are returned in the given vector \a w and the eigenvectors are returned in the given matrix // \a V, which are both resized to the correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, \a V will contain the left eigenvectors, otherwise \a V // will contain the right eigenvectors. In case \a V is a row-major matrix the eigenvectors are // returned in the rows of \a V, in case \a V is a column-major matrix the eigenvectors are // returned in the columns of \a V. In case the given matrix is a compile time symmetric matrix // with floating point elements, the resulting eigenvectors will be of floating point type and // therefore the elements of the given eigenvector matrix are expected to be of floating point // type. In all other cases they are expected to be of complex type. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions. The following two examples give an impression of the computation of singular values // and singular vectors for a general dense matrix with \c double and \c complex<double> element // type, respectively: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors s = svd( A ); // (1) Computing only the singular values of A svd( A, s ); // (2) Computing only the singular values of A svd( A, U, s, V ); // (3) Computing the singular values and vectors of A svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0) svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2] \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors s = svd( A ); // (1) Computing only the singular values of A svd( A, s ); // (2) Computing only the singular values of A svd( A, U, s, V ); // (3) Computing the singular values and vectors of A svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0) svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2] \endcode // Functions (1), (2) and (4) compute only singular values of the given general \a m-by-\a n // matrix, functions (3) and (5) additionally compute singular vectors. The resulting singular // values are returned in the given vector \a s, the left singular vectors are returned in the // given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, \a U, // and \a V are resized to the correct dimensions (if possible and necessary). // // Functions (4) and (5) allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of an Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // An Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of an Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of an Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from an Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up an Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent an Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of an Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // An HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; using cplx = complex<float>; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to an Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using an Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using an Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using an Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of an Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to an Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to an Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in an Hermitian matrix; no runtime overhead C = A - B; // Results in an Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in an Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >; // Recommendation 2: use sparse matrices for large diagonal matrices using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in an upper matrix; no runtime overhead C = A - B; // Results in an upper matrix; no runtime overhead C = A * B; // Results in an upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row, column, or band of a matrix. As such, views act as a reference to specific elements of // a vector or matrix. This reference is valid and can be used in every way as any other vector // or matrix can be used as long as the referenced vector or matrix is not resized or entirely // destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the // elements (e.g. modifying values, inserting or erasing elements) via the view are immediately // visible in the vector or matrix and changes made via the vector or matrix are immediately // visible in the view. // // It is also possible to create nested views (compound views), such as for instance bands of // submatrices or row selections on column selections. A compound view also acts as reference // to specific elements of the underlying vector or matrix and is valid as long as the underlying, // referenced vector or matrix is not resized or entirely destroyed. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // - \ref views_element_selections // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_row_selections // - \ref views_columns // - \ref views_column_selections // - \ref views_bands // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); // Warning: It is the programmer's responsibility to ensure the view does not outlive // the viewed vector or matrix (dangling reference)! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The first parameter specifies the offset of the subvector within the underlying dense or sparse // vector, the second parameter specifies the size of the subvector. The two parameters can be // specified either at compile time or at runtime: \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); // Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); \endcode // The \c subvector() function returns an expression representing the subvector view. The type of // this expression depends on the given subvector arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A subvector created // from a row vector can be used as any other row vector, a subvector created from a column vector // can be used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector of a // vector primitive on the left-hand side of an assignment or to grant read-access to a specific // subvector of a vector primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9]) auto sv = subvector( x, 0UL, 10UL ); // Setting the first ten elements of x to the 2nd row of matrix A sv = row( A, 2UL ); // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, 3UL, 10UL ); // Setting x to a subvector of the result of the addition between y and the 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode // \warning It is the programmer's responsibility to ensure the subvector does not outlive the // viewed vector: \code // Creating a subvector on a temporary vector; results in a dangling reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 auto sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // subvectors an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the subvector it is inserted into the subvector, if it is already contained // in the subvector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. This means that with // only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used. // For instance, the current number of elements can be obtained via the \c size() function, the // current capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since subvectors are references to a specific range of a // vector, several operations are not possible, such as resizing and swapping. The following // example shows this by means of a dense subvector view: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v auto sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 sv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = sv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] auto sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned auto dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections***************************************************************************** /*!\page views_element_selections Element Selections // // \tableofcontents // // // Element selections provide views on arbitrary compositions of elements of dense and sparse // vectors. These views act as a reference to the selected elements and represent them as another // dense or sparse vector. This reference is valid and can be used in every way any other dense // or sparse vector can be used as long as the vector containing the elements is not resized or // entirely destroyed. The element selection also acts as an alias to the vector elements in the // specified range: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the vector and changes made via the vector are immediately // visible in the elements. // // // \n \section views_element_selections_setup Setup of Element Selections // // An element selection can be created very conveniently via the \c elements() function. It can // be included via the header file \code #include <blaze/math/Elements.h> \endcode // The indices of the elements to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = elements<4UL,6UL,8UL,10UL>( x ); // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), array.size() ); // Selecting the element 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the elements of the underlying vector in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicVector<double,blaze::rowVector> x{ 0, 1, 2, 3, 4, 5, 6, 7, 8 }; // Selecting all even elements of the vector, i.e. selecting (0,2,4,6,8) auto e1 = elements( x, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd elements of the vector, i.e. selecting (1,3,5,7) auto e2 = elements( x, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the elements of the vector, i.e. selecting (8,7,6,5,4,3,2,1,0) auto e3 = elements( x, [max=v.size()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c elements() function returns an expression representing the view on the selected elements. // The type of this expression depends on the given arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. An element selection // created from a row vector can be used as any other row vector, an element selection created // from a column vector can be used as any other column vector. The view can also be used on both // sides of an assignment: It can either be used as an alias to grant write access to specific // elements of a vector primitive on the left-hand side of an assignment or to grant read-access // to specific elements of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } ); // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = row( A, 2UL ); // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y; // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between y and the 1st row of A x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode // Please note that using an element selection, which refers to an index multiple times, on the // left-hand side of an assignment leads to undefined behavior: \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times e = b; // Undefined behavior \endcode // In this example both vectors have the same size, which results in a correct vector assignment, // but the final value of the element at index 1 is unspecified. // // \warning It is the programmer's responsibility to ensure the element selection does not outlive // the viewed vector: \code // Creating an element selection on a temporary vector; results in a dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_element_selections_element_access Element Access // // The elements of an element selection can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } ); // Setting the 1st element of the element selection, which corresponds to // the element at index 4 in vector v e[1] = 2.0; \endcode // The numbering of the selected elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of selected elements. Alternatively, the elements of an element selection // can be traversed via iterators. Just as with vectors, in case of non-const element selections, // \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of // constant element selections an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of dense vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { *it = ...; // OK: Write access to the dense vector value. ... = *it; // OK: Read access to the dense vector value. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense vector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of sparse vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_element_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse element selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 std::vector<size_t> indices; // ... Selecting indices of the sparse vector auto e = elements( v, indices ); // The subscript operator provides access to the selected elements of the sparse vector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse vector, the element is inserted. e[42] = 2.0; // The second operation for inserting elements via the element selection is the set() function. // In case the element is not contained in the vector it is inserted into the vector, if it is // already contained in the vector its value is modified. e.set( 45UL, -1.2 ); // An alternative for inserting elements into the vector is the insert() function. However, it // inserts the element only in case the element is not already contained in the vector. e.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In case // of element selections, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the selection and that the selections's // capacity is large enough to hold the new element. Note however that due to the nature of an // element selection, which is an alias to arbitrary elements of a sparse vector, the append() // function does not work as efficiently for an element selection as it does for a vector. e.reserve( 10UL ); e.append( 51UL, -2.1 ); \endcode // \n \section views_element_selections_common_operations Common Operations // // An element selection can be used like any other dense or sparse vector. For instance, the // number of selected elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since element selections are references to a specific range of a vector, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of an element selection on a dense vector: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); e.size(); // Returns the number of elements in the element selection e.capacity(); // Returns the capacity of the element selection e.nonZeros(); // Returns the number of non-zero elements contained in the element selection e.resize( 84UL ); // Compilation error: Cannot resize an element selection auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_element_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse element selections can be used in all arithmetic operations that any other // dense or sparse vector can be used in. The following example gives an impression of the use of // dense element selections within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse // element selections with fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21] e = d2; // Dense vector assignment to the selected elements elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements d3 = e + d2; // Dense vector/dense vector addition s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements elements( d1, indices1 ) += d2; // Addition assignment elements( d1, indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= e; // Multiplication assignment double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix() // function. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The first and second parameter specify the row and column of the first element of the submatrix. // The third and fourth parameter specify the number of rows and columns, respectively. The four // parameters can be specified either at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); \endcode // The \c submatrix() function returns an expression representing the submatrix view. The type of // this expression depends on the given submatrix arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from // a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major // matrix will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, 0UL, 8UL, 4UL ); // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C; // Assigning part of the result of a matrix addition to the first submatrix sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode // \warning It is the programmer's responsibility to ensure the submatrix does not outlive the // viewed matrix: \code // Creating a submatrix on a temporary matrix; results in a dangling reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant submatrices an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the function call operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of submatrices, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // or column of the submatrix and that the according row's or column's capacity is large // enough to hold the new element. Note however that due to the nature of a submatrix, which // may be an alias to the middle of a sparse matrix, the append() function does not work as // efficiently for a submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // A submatrix view can be used like any other dense or sparse matrix. This means that with only // a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // submatrices are views on a specific submatrix of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_setup Setup of Rows // <hr> // // \image html row.png // \image latex row.eps "Row view" width=250pt // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows // of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st row of matrix A (compile time index) auto row1 = row<1UL>( A ); // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 = row( A, 2UL ); \endcode // The \c row() function returns an expression representing the row view. The type of this // expression depends on the given row arguments, primarily the type of the matrix and the compile // time arguments. If the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the row does not outlive the viewed // matrix: \code // Creating a row on a temporary matrix; results in a dangling reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of a row can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); // Setting the 1st element of the dense row, which corresponds // to the 1st element in the 4th row of matrix A row4[1] = 2.0; \endcode // The numbering of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of a // row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // rows an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. This means that with only a few exceptions // all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the // current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since rows are references to specific rows of a matrix, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense row view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st row of a column-major matrix A auto row1 = row( A, 1UL ); for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row view on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections********************************************************************************* /*!\page views_row_selections Row Selections // // \tableofcontents // // // Row selections provide views on arbitrary compositions of rows of dense and sparse matrices. // These views act as a reference to the selected rows and represent them as another dense or // sparse matrix. This reference is valid and can be used in every way any other dense or sparse // matrix can be used as long as the matrix containing the rows is not resized or entirely // destroyed. The row selection also acts as an alias to the matrix elements in the specified // range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are // immediately visible in the matrix and changes made via the matrix are immediately visible // in the rows. // // // \n \section views_row_selections_setup Setup of Row Selections // // A row selection can be created very conveniently via the \c rows() function. It can be included // via the header file \code #include <blaze/math/Rows.h> \endcode // The indices of the rows to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = rows<4UL,6UL,8UL,10UL>( A ); // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), array.size() ); // Selecting the row 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the rows of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicMatrix<double,blaze::rowMajor> A( 9UL, 18UL ); // Selecting all even rows of the matrix, i.e. selecting the rows 0, 2, 4, 6, and 8 auto rs1 = rows( A, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd rows of the matrix, i.e. selecting the rows 1, 3, 5, and 7 auto rs2 = rows( A, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the rows of the matrix, i.e. selecting the rows 8, 7, 6, 5, 4, 3, 2, 1, and 0 auto rs3 = rows( A, [max=A.rows()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c rows() function returns an expression representing the view on the selected rows. The // type of this expression depends on the given arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // row selection will always be treated as a row-major matrix, regardless of the storage order of // the matrix containing the rows. The view can also be used on both sides of an assignment: It // can either be used as an alias to grant write access to specific rows of a matrix primitive // on the left-hand side of an assignment or to grant read-access to specific rows of a matrix // primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; blaze::DynamicMatrix<double,blaze::columnMajor> B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the row selection does not outlive the // viewed matrix: \code // Creating a row selection on a temporary matrix; results in a dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_row_selections_element_access Element Access // // The elements of a row selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the first four rows of A in reverse order auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the row selection, which corresponds // to the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode // Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as // with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant row selection an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_row_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse row selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse row // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse row selection, the element // is inserted into the row selection. rs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the row selection it is inserted into the row selection, if it is already // contained in the row selection its value is modified. rs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the row selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // row selection. rs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of row selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // of the row selection and that the according row's capacity is large enough to hold the new // element. Note however that due to the nature of a row selection, which may be an alias to // an arbitrary collection of rows, the append() function does not work as efficiently for // a row selection as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_row_selections_common_operations Common Operations // // A view on specific rows of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // row selections are views on specific rows of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } ); rs.rows(); // Returns the number of rows of the row selection rs.columns(); // Returns the number of columns of the row selection rs.capacity(); // Returns the capacity of the row selection rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_row_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse row selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use // of dense row selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21] rs = D2; // Dense matrix assignment to the selected rows rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows D3 = rs + D2; // Dense matrix/dense matrix addition S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur product assignment a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices // // Especially noteworthy is that row selections can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd row of a column-major matrix A auto rs = rows( A, { 1UL, 3UL } ); // Traversing row 0 of the selection, which corresponds to the 1st row of matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a row selection on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row selection on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix elements. // Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th row of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // the 15th, 30th, and 45th row of the column-major matrix A with B. blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B; \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_colums_setup Setup of Columns // <hr> // // \image html column.png // \image latex column.eps "Column view" width=250pt // // A reference to a dense or sparse column can be created very conveniently via the \c column() // function. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of // columns of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a reference to the 1st column of matrix A (compile time index) auto col1 = column<1UL>( A ); // Creating a reference to the 2nd column of matrix A (runtime index) auto col2 = column( A, 2UL ); \endcode // The \c column() function returns an expression representing the column view. The type of this // expression depends on the given column arguments, primarily the type of the matrix and the // compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other column vector, i.e. it can be assigned to, it // can be copied from, and it can be used in arithmetic operations. The reference can also be used // on both sides of an assignment: The column can either be used as an alias to grant write access // to a specific column of a matrix primitive on the left-hand side of an assignment or to grant // read-access to a specific column of a matrix primitive or expression on the right-hand side // of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::columnVector> x; blaze::CompressedVector<double,blaze::columnVector> y; blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the column does not outlive the // viewed matrix: \code // Creating a column on a temporary matrix; results in a dangling reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of a column can be directly accessed with the subscript operator. \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL ); // Setting the 1st element of the dense column, which corresponds // to the 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode // The numbering of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of a column // can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // columns an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since columns are references to specific columns of a matrix, several // operations are not possible on views, such as resizing and swapping. The following example // shows this by means of a dense column view: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A auto col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st column of a column-major matrix A auto col1 = column( A, 1UL ); for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column view on a matrix // with column-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // A with the 15th column of the row-major matrix B. blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix \c B would result in a more efficient evaluation. // // \n Previous: \ref views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections****************************************************************************** /*!\page views_column_selections Column Selections // // \tableofcontents // // // Column selections provide views on arbitrary compositions of columns of dense and sparse // matrices. These views act as a reference to the selected columns and represent them as another // dense or sparse matrix. This reference is valid and can be used in every way any other dense // or sparse matrix can be used as long as the matrix containing the columns is not resized or // entirely destroyed. The column selection also acts as an alias to the matrix elements in the // specified range: Changes made to the columns (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the columns. // // // \n \section views_column_selections_setup Setup of Column Selections // // A column selection can be created very conveniently via the \c columns() function. It can be // included via the header file \code #include <blaze/math/Columns.h> \endcode // The indices of the columns to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = columns<4UL,6UL,8UL,10UL>( A ); // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), array.size() ); // Selecting the column 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the columns of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicMatrix<double,blaze::columnMajor> A( 18UL, 9UL ); // Selecting all even columns of the matrix, i.e. selecting the columns 0, 2, 4, 6, and 8 auto cs1 = columns( A, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd columns of the matrix, i.e. selecting the columns 1, 3, 5, and 7 auto cs2 = columns( A, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the columns of the matrix, i.e. selecting the columns 8, 7, 6, 5, 4, 3, 2, 1, and 0 auto cs3 = columns( A, [max=A.columns()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c columns() function returns an expression representing the view on the selected columns. // The type of this expression depends on the given arguments, primarily the type of the matrix // and the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // column selection will always be treated as a column-major matrix, regardless of the storage // order of the matrix containing the columns. The view can also be used on both sides of an // assignment: It can either be used as an alias to grant write access to specific columns of a // matrix primitive on the left-hand side of an assignment or to grant read-access to specific // columns of a matrix primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; blaze::DynamicMatrix<double,blaze::rowMajor> B; blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and initialization // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the column selection does not outlive // the viewed matrix: \code // Creating a column selection on a temporary matrix; results in a dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_column_selections_element_access Element Access // // The elements of a column selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the first four columns of A in reverse order auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the column selection, which corresponds // to the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode // Alternatively, the elements of a column selection can be traversed via (const) iterators. // Just as with matrices, in case of non-const column selection, \c begin() and \c end() return // an iterator, which allows to manipuate the elements, in case of constant column selection an // iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_column_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse column selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256 auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse column // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse column selection, the element // is inserted into the column selection. cs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the column selection it is inserted into the column selection, if it is // already contained in the column selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the column selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // column selection. cs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of column selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according column // of the column selection and that the according column's capacity is large enough to hold the // new element. Note however that due to the nature of a column selection, which may be an alias // to an arbitrary collection of columns, the append() function does not work as efficiently // for a column selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_column_selections_common_operations Common Operations // // A view on specific columns of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // column selections are views on specific columns of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } ); cs.rows(); // Returns the number of rows of the column selection cs.columns(); // Returns the number of columns of the column selection cs.capacity(); // Returns the capacity of the column selection cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_column_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse column selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use of // dense column selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21] cs = D2; // Dense matrix assignment to the selected columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns D3 = cs + D2; // Dense matrix/dense matrix addition S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // Schur product assignment a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix // // Especially noteworthy is that column selections can be created for both row-major and // column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a // row directly and the interface of a column-major matrix only allows to traverse a column, via // views it is possible to traverse a row of a column-major matrix or a column of a row-major // matrix. For instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd column of a column-major matrix A auto cs = columns( A, { 1UL, 3UL } ); // Traversing column 0 of the selection, which corresponds to the 1st column of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a column selection on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column selection on a // matrix with column-major storage format. This is due to the non-contiguous storage of the // matrix elements. Therefore care has to be taken in the choice of the most suitable storage // order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th column of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // A with the 15th, 30th, and 45th column of the row-major matrix B. blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } ); \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a column-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands****************************************************************************************** /*!\page views_bands Bands // // \tableofcontents // // // Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the // subdiagonal, ...). As such, bands act as a reference to a specific band. This reference // is valid and can be used in every way any other vector can be used as long as the matrix // containing the band is not resized or entirely destroyed. The band also acts as an alias to // the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the band. // // // \n \section views_bands_setup Setup of Bands // <hr> // // \image html band.png // \image latex band.eps "Band view" width=250pt // // A reference to a dense or sparse band can be created very conveniently via the \c band() // function. It can be included via the header file \code #include <blaze/math/Band.h> \endcode // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the // total number of rows and \c N is the total number of columns, and can be specified both at // compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st lower band of matrix A (compile time index) auto band1 = band<-1L>( A ); // Creating a reference to the 2nd upper band of matrix A (runtime index) auto band2 = band( A, 2L ); \endcode // In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view // on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band() // function with a compile time index of 0: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the diagonal of matrix A via the band() and diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A ); static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" ); \endcode // Both the \c band() and the \c diagonal() function return an expression representing the band // view. The type of this expression depends on the given arguments, primarily the type of the // matrix and the compile time arguments. If the type is required, it can be determined via // \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); \endcode // This resulting view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. By default, bands are considered // column vectors, but this setting can be changed via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch // (see \ref transpose_flag). The reference can also be used on both sides of an assignment: The // band can either be used as an alias to grant write access to a specific band of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific band of // a matrix primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); band2 = x; // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; // Setting x to the 2nd lower band of the result of the matrix multiplication x = band( A * B, -2L ); // Setting y to the 2nd upper band of the result of the sparse matrix multiplication y = band( C * D, 2L ); \endcode // \warning It is the programmer's responsibility to ensure the band does not outlive the viewed // matrix: \code // Creating a band on a temporary matrix; results in a dangling reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_bands_element_access Element Access // <hr> // // The elements of a band can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L ); // Setting the 1st element of the dense band, which corresponds // to the 1st element in the 4th upper band of matrix A band4[1] = 2.0; \endcode // The numbering of the band elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of elements of the referenced band. Alternatively, the elements of a band // can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and // \c end() return an iterator, which allows to manipulate the elements, in case of constant bands // an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th upper band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { *it = ...; // OK; Write access to the dense band value ... = *it; // OK: Read access to the dense band value. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense band value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_bands_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse band can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto diag( band( A, 0L ) ); // Reference to the diagonal of A // The subscript operator provides access to all possible elements of the sparse band, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse band, the element is inserted into the band. diag[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the band it is inserted into the band, if it is already contained in // the band its value is modified. diag.set( 45UL, -1.2 ); // An alternative for inserting elements into the band is the insert() function. However, // it inserts the element only in case the element is not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode // \n \section views_bands_common_operations Common Operations // <hr> // // A band view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of band elements can be obtained via the \c size() function, the current // capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since bands are references to specific bands of a matrix, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of a dense band view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd upper band of matrix A auto band2 = band( A, 2L ); band2.size(); // Returns the number of elements in the band band2.capacity(); // Returns the capacity of the band band2.nonZeros(); // Returns the number of non-zero elements contained in the band band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_bands_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse bands can be used in all arithmetic operations that any other dense or // sparse vector can be used in. The following example gives an impression of the use of dense // bands within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse bands with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto diag ( band( A, 0L ) ); // Reference to the diagonal of A band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag = 1.0; // Homogeneous initialization of the diagonal of A band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A b = diag + a; // Dense vector/dense vector addition b = c + band( A, -1L ); // Sparse vector/dense vector addition b = diag * band( A, -2L ); // Component-wise vector multiplication band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // Multiplication assignment double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors A = band( A, -1L ) * trans( c ); // Outer product between two vectors \endcode // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition // <ul> // <li> \ref vector_vector_addition </li> // <li> \ref matrix_matrix_addition </li> // <li> \ref scalar_addition </li> // </ul> // </li> // <li> \ref subtraction // <ul> // <li> \ref vector_vector_subtraction </li> // <li> \ref matrix_matrix_subtraction </li> // <li> \ref scalar_subtraction </li> // </ul> // </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // <li> \ref vector_kronecker_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // <li> \ref matrix_kronecker_product </li> // </ul> // </li> // </ul> // // \n Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // \n \section vector_vector_addition Vector/Vector Addition // <hr> // // The addition of vectors is as intuitive as the addition of scalar values. For the addition of // any two vectors the addition operator (i.e. \c operator+()) can be used. It even enables the // addition of dense and sparse vectors: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a dense and a sparse column vector of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to add vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // Also note that the addition of two vectors with the same element type is favorable due to // possible vectorization of the operation: \code blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode // \n \section outer_sum Outer Sum // <hr> // // The addition between a column vector and a row vector results in the outer sum of the two // vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 1 5 0 6 ) // A = ( 4 8 3 9 ) // ( -2 2 -3 3 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = v1 + v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) + v2; \endcode // \n \section matrix_matrix_addition Matrix/Matrix Addition // <hr> // // For the addition of any two matrices the addition operator (i.e. \c operator+()) can be used. // It even enables the addition of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::CompressedMatrix<size_t,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<float,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a sparse column-major and a dense row-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to add row-major and column-major matrices. // Note however that in favor of performance the addition of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two matrices with // the same element type are added, the performance can be much higher due to vectorization of // the operation. \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n \section scalar_addition Scalar Addition // <hr> // // For convenience it is also possible to add a scalar value to a dense vector or dense matrix, // which has the same effect as adding a uniform vector or matrix. In \b Blaze it is possible to // use all built-in/fundamental data types except bool as scalar values. Additionally, it is // possible to use \c std::complex values with the same built-in data types as element type. // Examples: \code blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 }; blaze::DynamicVector<int> v2 = v1 + 2; // Results in { 5, 4, 7, -2, 3, 8 } blaze::CompressedVector<int> v3 = 3 + v1; // Results in { 6, 5, 8, -1, 4, 9 } \endcode \code blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 }, { -4, 1, 6 } }; blaze::DynamicMatrix<int> M2 = M1 + 2; // Results in { { 5, 4, 7 }, { -2, 3, 8 } } blaze::CompressedMatrix<int> M3 = 3 + M1; // Results in { { 6, 5, 8 }, { -1, 4, 9 } } \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // \n \section vector_vector_subtraction Vector/Vector Subtraction // <hr> // // The subtraction of vectors works exactly as intuitive as the addition, but with the subtraction // operator (i.e. \c operator-()). It also enables the subtraction of dense and sparse vectors: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a dense and a sparse column vector of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // Also note that the subtraction of two vectors with the same element type is favorable due to // possible vectorization of the operation: \code blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors \endcode // \n \section outer_difference Outer Difference // <hr> // // The subtraction between a column vector and a row vector results in the outer difference of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 3 -1 4 -2 ) // A = ( 6 2 7 1 ) // ( 0 -4 1 -5 ) // StaticMatrix<int,3UL,3UL> M1 = v1 - v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) - v2; \endcode // \n \section matrix_matrix_subtraction Matrix/Matrix Subtraction // <hr> // // For the subtraction of any two matrices the subtraction operator (i.e. \c operator-()) can be // used. It even enables the subtraction of dense and sparse matrices: \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to subtract row-major and column-major // matrices. Note however that in favor of performance the subtraction of two matrices with the // same storage order is favorable. The same argument holds for the element type: In case two // matrices with the same element type are subtracted, the performance can be much higher due // to vectorization of the operation. \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n \section scalar_subtraction Scalar Subtraction // <hr> // // For convenience it is also possible to subtract a scalar value from a dense vector or dense // matrix, which has the same effect as subtracting a uniform vector or matrix. In \b Blaze it is // possible to use all built-in/fundamental data types except bool as scalar values. Additionally, // it is possible to use \c std::complex values with the same built-in data types as element type. // Examples: \code blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 }; blaze::DynamicVector<int> v2 = v1 - 2; // Results in { 1, 0, 3, -6, -1, 4 } blaze::CompressedVector<int> v3 = 3 - v1; // Results in { 0, 1, -2, 7, 2, -3 } \endcode \code blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 }, { -4, 1, 6 } }; blaze::DynamicMatrix<int> M2 = M1 - 2; // Results in { { 1, 0, 3 }, { -6, -1, 4 } } blaze::CompressedMatrix<int> M3 = 3 - M1; // Results in { { 0, 1, -2 }, { 7, 2, -3 } } \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of vector or a matrix with a scalar value. // Alternatively it is also possible to divide a vector or a matrix by a scalar value. In \b Blaze // it is possible to use all built-in/fundamental data types except bool as scalar values. // Additionally, it is possible to use \c std::complex values with the same built-in data types // as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; // Scalar multiplication blaze::CompressedVector<float> v3 = -0.3F * v1; // Scalar multiplication blaze::DynamicVector<double> v4 = v1 / 1.2; // Scalar division blaze::CompressedVector<float> v5 = 12.0F / v1; // Scalar division (only dense vectors) \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; // Scalar multiplication blaze::CompressedMatrix<float> M3 = -0.3F * M1; // Scalar multiplication blaze::DynamicMatrix<double> M4 = M1 / 1.2; // Scalar division blaze::CompressedMatrix<float> M5 = 12.0F / M1; // Scalar division (only dense matrices) \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications or divisions // (see the following example). However, each vector and matrix provides the \c scale() function, // which can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( -2 6 -4 8 ) // A = ( -5 15 -10 20 ) // ( 1 -3 2 -4 ) // StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // // \n \section vector_kronecker_product Kronecker Product // <hr> // // The Kronecker product of two vectors with the same transpose flag can be computed via the // \a kron() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<double> v1( 28UL ); CompressedVector<float> v2( 17UL ); // ... Initialization of the vectors CompressedVector<double> v3 = kron( v1, v2 ); \endcode // Both dense and sparse vectors can be used for a Kronecker product. It is possible to multiply // two vectors with different element type, as long as the element types themselves can be // multiplied. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // // \n \section outer_quotient Outer Quotient // <hr> // // The division between a column vector and a row vector results in the outer quotient of the // two vectors: \code blaze::StaticVector<double,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<double,rowVector> v2{ -1, 5, -2, 4 }; // Results in the matrix // // ( -2 0.4 -1 0.5 ) // A = ( -5 1 -2.5 1.25 ) // ( 1 -0.2 0.5 -0.25 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = v1 / v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 5, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) / v2; \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // Both dense and sparse matrices can be used for a Schur product. The storage order of the two // matrices poses no restrictions on the operation, all variations are possible. It is also // possible to multiply two matrices with different element type, as long as the element types // themselves can be multiplied. // // // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // // \n \section matrix_kronecker_product Kronecker Product // <hr> // // The Kronecker product of two matrices can be computed via the \a kron() function: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 17UL, 11UL ); // ... Initialization of the matrices CompressedMatrix<double> M3 = kron( M1, M2 ); \endcode // Both dense and sparse matrices can be used for a Kronecker product. The storage order of the // two matrices poses no restrictions on the operation, all variations are possible. It is also // possible to multiply two matrices with different element type, as long as the element types // themselves can be multiplied. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref bitwise_operations */ //************************************************************************************************* //**Bitwise Operations***************************************************************************** /*!\page bitwise_operations Bitwise Operations // // \tableofcontents // // // \b Blaze provides the following bitwise operations for vectors and matrices: // // <ul> // <li> \ref bitwise_shift // <ul> // <li> \ref vector_vector_shift </li> // <li> \ref matrix_matrix_shift </li> // <li> \ref scalar_shift </li> // </ul> // </li> // <li> \ref bitwise_and // <ul> // <li> \ref vector_vector_bitand </li> // <li> \ref matrix_matrix_bitand </li> // <li> \ref scalar_bitand </li> // </ul> // </li> // <li> \ref bitwise_or // <ul> // <li> \ref vector_vector_bitor </li> // <li> \ref matrix_matrix_bitor </li> // <li> \ref scalar_bitor </li> // </ul> // </li> // <li> \ref bitwise_xor // <ul> // <li> \ref vector_vector_bitxor </li> // <li> \ref matrix_matrix_bitxor </li> // <li> \ref scalar_bitxor </li> // </ul> // </li> // </ul> // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref bitwise_shift */ //************************************************************************************************* //**Bitwise Shift********************************************************************************** /*!\page bitwise_shift Bitwise Shift // // \n \section vector_vector_shift Vector/Vector Shift // <hr> // // Via the left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>()) // it is possible to perform an elementwise shift of a dense vector: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 << v2; // Elementwise left-shift of a dense column vector v3 = v1 >> v2; // Elementwise right-shift of a dense column vector \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to shift vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 << v2; // Compilation error: Cannot shift a column vector by a row vector v1 << trans( v2 ); // OK: Shifting a column vector by another column vector \endcode // Furthermore, it is possible to use different element types in the two vector operands, but // shifting two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 << v2; // Vectorized left-shift of an unsigned int vector \endcode // \n \section matrix_matrix_shift Matrix/Matrix Shift // <hr> // // The left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>()) // can also be used to perform an elementwise shift of a dense matrix: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 << M2; // Elementwise left-shift of a dense column-major matrix M3 = M1 >> M2; // Elementwise right-shift of a dense column-major matrix \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 << M2; // Vectorized left-shift of an unsigned int matrix \endcode // \n \section scalar_shift Scalar Shift // <hr> // // It is also possible to uniformly shift all elements of a dense vector or dense matrix by means // of a scalar, which has the same effect as shifting by means of a uniform vector or matrix (see // \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). In \b Blaze it is // possible to use all built-in/fundamental data types except bool as scalar values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3, 2, 5, 4, 1, 6 }; // Uniform left-shift by one bit of all elements of v1; Results in // // ( 6, 4, 10, 8, 2, 12 ) // blaze::DynamicVector<int> v2( v1 << 1U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3, 2, 5 }, { 4, 1, 6 } }; // Uniform left-shift by one bit of all elements of M1; Results in // // ( 6, 4, 10 ) // ( 8, 2, 12 ) // blaze::DynamicMatrix<unsigned int> M2( M1 << 1U ); \endcode // \n Previous: \ref bitwise_operations &nbsp; &nbsp; Next: \ref bitwise_and */ //************************************************************************************************* //**Bitwise AND************************************************************************************ /*!\page bitwise_and Bitwise AND // // \n \section vector_vector_bitand Vector/Vector Bitwise AND // <hr> // // Via the bitwise AND operator (i.e. operator&()) it is possible to perform an elementwise // bitwise AND with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 & v2; // Elementwise bitwise AND of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 & v2; // Compilation error: Cannot AND a column vector and a row vector v1 & trans( v2 ); // OK: Bitwise AND of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise AND of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 & v2; // Vectorized bitwise AND of an unsigned int vector \endcode // \n \section matrix_matrix_bitand Matrix/Matrix Bitwise AND // <hr> // // The bitwise AND operator (i.e. operator&()) can also be used to perform an elementwise bitwise // AND with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 & M2; // Elementwise bitwise AND of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 & M2; // Vectorized bitwise AND of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitand Scalar Bitwise AND // <hr> // // Is is also possible to perform a bitwise AND between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise AND by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise AND with all elements of v1; Results in // // ( 3, 2, 1, 0, 1, 2 ) // blaze::DynamicVector<int> v2( v1 & 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise AND with all elements of M1; Results in // // ( 3, 2, 1 ) // ( 0, 1, 2 ) // blaze::DynamicMatrix<unsigned int> M2( M1 & 3U ); \endcode // \n Previous: \ref bitwise_shift &nbsp; &nbsp; Next: \ref bitwise_or */ //************************************************************************************************* //**Bitwise OR************************************************************************************* /*!\page bitwise_or Bitwise OR // // \n \section vector_vector_bitor Vector/Vector Bitwise OR // <hr> // // Via the bitwise OR operator (i.e. operator|()) it is possible to perform an elementwise // bitwise OR with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 | v2; // Elementwise bitwise OR of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 | v2; // Compilation error: Cannot OR a column vector and a row vector v1 | trans( v2 ); // OK: Bitwise OR of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise OR of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 | v2; // Vectorized bitwise OR of an unsigned int vector \endcode // \n \section matrix_matrix_bitor Matrix/Matrix Bitwise OR // <hr> // // The bitwise OR operator (i.e. operator|()) can also be used to perform an elementwise bitwise // OR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 | M2; // Elementwise bitwise OR of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 | M2; // Vectorized bitwise OR of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitor Scalar Bitwise OR // <hr> // // Is is also possible to perform a bitwise OR between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise OR by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise OR with all elements of v1; Results in // // ( 3, 3, 7, 7, 3, 3 ) // blaze::DynamicVector<int> v2( v1 | 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise OR with all elements of M1; Results in // // ( 3, 3, 7 ) // ( 7, 3, 3 ) // blaze::DynamicMatrix<unsigned int> M2( M1 | 3U ); \endcode // \n Previous: \ref bitwise_and &nbsp; &nbsp; Next: \ref bitwise_xor */ //************************************************************************************************* //**Bitwise XOR************************************************************************************ /*!\page bitwise_xor Bitwise XOR // // \n \section vector_vector_bitxor Vector/Vector Bitwise XOR // <hr> // // Via the bitwise XOR operator (i.e. operator^()) it is possible to perform an elementwise // bitwise XOR with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 ^ v2; // Elementwise bitwise XOR of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 ^ v2; // Compilation error: Cannot XOR a column vector and a row vector v1 ^ trans( v2 ); // OK: Bitwise XOR of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise XOR of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 ^ v2; // Vectorized bitwise XOR of an unsigned int vector \endcode // \n \section matrix_matrix_bitxor Matrix/Matrix Bitwise XOR // <hr> // // The bitwise XOR operator (i.e. operator^()) can also be used to perform an elementwise bitwise // XOR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 ^ M2; // Elementwise bitwise XOR of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 ^ M2; // Vectorized bitwise XOR of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitxor Scalar Bitwise XOR // <hr> // // Is is also possible to perform a bitwise XOR between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise XOR by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise XOR with all elements of v1; Results in // // ( 0, 1, 6, 7, 2, 5 ) // blaze::DynamicVector<int> v2( v1 ^ 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise XOR with all elements of M1; Results in // // ( 0, 1, 6 ) // ( 7, 2, 5 ) // blaze::DynamicMatrix<unsigned int> M2( M1 ^ 3U ); \endcode // \n Previous: \ref bitwise_or &nbsp; &nbsp; Next: \ref logical_operations */ //************************************************************************************************* //**Logical Operations***************************************************************************** /*!\page logical_operations Logical Operations // // \tableofcontents // // // \b Blaze provides the following logical operations for vectors and matrices: // // <ul> // <li> \ref logical_not // <ul> // <li> \ref vector_vector_not </li> // <li> \ref matrix_matrix_not </li> // </ul> // </li> // <li> \ref logical_and // <ul> // <li> \ref vector_vector_and </li> // <li> \ref matrix_matrix_and </li> // </ul> // </li> // <li> \ref logical_or // <ul> // <li> \ref vector_vector_or </li> // <li> \ref matrix_matrix_or </li> // </ul> // </li> // </ul> // // \n Previous: \ref bitwise_xor &nbsp; &nbsp; Next: \ref logical_not */ //************************************************************************************************* //**Logical NOT************************************************************************************ /*!\page logical_not Logical NOT // // \n \section vector_vector_not Vector/Vector Logical NOT // <hr> // // Via the logical NOT operator (i.e. operator!()) it is possible to compute an elementwise // logical NOT of a dense vector: \code blaze::DynamicVector<bool> v1( 5UL ), v2; // ... Initializing the vectors v2 = !v1; // Elementwise logical NOT of a dense column vector \endcode // \n \section matrix_matrix_not Matrix/Matrix Logical NOT // <hr> // // The logical NOT operator (i.e. operator!()) can also be used to compute an elementwise logical // NOT with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,rowMajor> M1( 7UL, 3UL ), M2; // ... Initializing the matrices M2 = !M1; // Elementwise logical NOT of a dense row-major matrix \endcode // \n Previous: \ref logical_operations &nbsp; &nbsp; Next: \ref logical_and */ //************************************************************************************************* //**Logical AND************************************************************************************ /*!\page logical_and Logical AND // // \n \section vector_vector_and Vector/Vector Logical AND // <hr> // // Via the logical AND operator (i.e. operator&&()) it is possible to compute an elementwise // logical AND with dense vectors: \code blaze::DynamicVector<bool> v1( 5UL ), v3; blaze::DynamicVector<bool> v2( 5UL ); // ... Initializing the vectors v3 = v1 && v2; // Elementwise logical AND of two dense column vectors \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<bool,columnVector> v1( 5UL ); blaze::DynamicVector<bool,rowVector> v2( 5UL ); v1 && v2; // Compilation error: Cannot AND a column vector and a row vector v1 && trans( v2 ); // OK: Logical AND of two column vectors \endcode // \n \section matrix_matrix_and Matrix/Matrix Logical AND // <hr> // // The logical AND operator (i.e. operator&&()) can also be used to compute an elementwise logical // AND with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 && M2; // Elementwise logical AND of two dense matrices \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. // // \n Previous: \ref logical_not &nbsp; &nbsp; Next: \ref logical_or */ //************************************************************************************************* //**Logical OR************************************************************************************* /*!\page logical_or Logical OR // // \n \section vector_vector_or Vector/Vector Logical OR // <hr> // // Via the logical OR operator (i.e. operator||()) it is possible to perform an elementwise // logical OR with dense vectors: \code blaze::DynamicVector<bool> v1( 5UL ), v3; blaze::DynamicVector<bool> v2( 5UL ); // ... Initializing the vectors v3 = v1 || v2; // Elementwise logical OR of two dense column vectors \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 || v2; // Compilation error: Cannot OR a column vector and a row vector v1 || trans( v2 ); // OK: Logical OR of two column vectors \endcode // \n \section matrix_matrix_or Matrix/Matrix Logical OR // <hr> // // The logical OR operator (i.e. operator||()) can also be used to perform an elementwise logical // OR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 || M2; // Elementwise logical OR of two dense matrices \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. // // \n Previous: \ref logical_and &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // For all possible operations \b Blaze tries to achieve maximum performance on a single CPU // core. However, today's CPUs are not single core anymore, but provide several (homogeneous // or heterogeneous) compute cores. In order to fully exploit the performance potential of a // multicore CPU, computations have to be parallelized across all available cores of a CPU. // For this purpose, \b Blaze provides four different shared memory parallelization techniques: // // - \ref hpx_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // - \ref openmp_parallelization // // When any of the shared memory parallelization techniques is activated, all arithmetic // operations on dense vectors and matrices (including additions, subtractions, multiplications, // divisions, and all componentwise arithmetic operations) and most operations on sparse vectors // and matrices are automatically run in parallel. However, in addition, \b Blaze provides means // to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref logical_or &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization**************************************************************************** /*!\page hpx_parallelization HPX Parallelization // // \tableofcontents // // // The first shared memory parallelization provided with \b Blaze is based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the HPX-based parallelization, the following steps have to be taken: First, // the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_HPX_THREADS ... \endcode // Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked. // And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see // the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a> // for further details). These three actions will cause the \b Blaze library to automatically try // to run all operations in parallel with the specified number of HPX threads. // // Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based, // and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations // are enabled in combination with the HPX thread parallelization. // // The number of threads used by the HPX backend has to be specified via the command line: \code ... --hpx:threads 4 ... \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of HPX threads, the function will return the actual number of threads used by // the HPX subsystem. // // // \n \section hpx_configuration HPX Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given // operation is large enough and exceeds a certain threshold the operation is executed in parallel. // All thresholds related to the HPX-based parallelization are contained within the configuration // file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the HPX-based parallelization. // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11 // threads are enabled on the command line, the HPX-based parallelization has priority and is // preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based // on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have // priority, i.e. are preferred in case either is enabled in combination with the Boost thread // parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // The fourth and final shared memory parallelization provided with \b Blaze is based on // <a href="https://www.openmp.org">OpenMP</a>. // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. Note however that the HPX-based, the C++11 // thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in // case either is enabled in combination with the OpenMP thread parallelization. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization********************************************************************************** /*!\page customization Customization // // Although \b Blaze tries to work out of the box for every possible setting, still it may be // necessary to adapt the library to specific requirements. The following three pages explain // how to customize the \b Blaze library to your own needs: // // - \ref configuration_files // - \ref vector_and_matrix_customization // - \ref error_reporting_customization // // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_TRANSPOSE_FLAG=blaze::columnVector ... \endcode \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_STORAGE_ORDER=blaze::rowMajor ... \endcode \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // By default, \b Blaze assumes a 32-bit BLAS library. Via the \c BLAZE_BLAS_IS_64BIT compilation // switch, the 64-bit BLAS mode can be selected: \code #define BLAZE_BLAS_IS_64BIT 1 \endcode // Note that the \c BLAZE_BLAS_IS_64BIT switch also has an effect on the \ref lapack_functions. // Please also note that it might additionally be necessary to use a compilation switch to put // the BLAS/LAPACK library into 64-bit mode (e.g. \c MKL_ILP64 for the Intel MKL library). // // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Additionally, it is possible to specify the name of the BLAS include file via the // \c BLAZE_BLAS_INCLUDE_FILE switch. The default setting is <tt><cblas.h></tt>: \code #define BLAZE_BLAS_INCLUDE_FILE <cblas.h> \endcode // Alternatively, all settings can be specified via command line or by defining the symbols // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_BLAS_MODE=1 -DBLAZE_BLAS_IS_64BIT=1 -DBLAZE_BLAS_IS_PARALLEL=1 -DBLAZE_BLAS_INCLUDE_FILE='<cblas.h>' ... \endcode \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_64BIT 1 #define BLAZE_BLAS_IS_PARALLEL 1 #define BLAZE_BLAS_INCLUDE_FILE <cblas.h> #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any \b Blaze header file: \code g++ ... -DBLAZE_CACHE_SIZE=3145728 ... \endcode \code #define BLAZE_CACHE_SIZE 3145728 #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_VECTORIZATION=1 ... \endcode \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // // \n \section alignment Alignment // <hr> // // For performance reasons, the vector types \ref vector_types_static_vector and // \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and // \ref matrix_types_hybrid_matrix by default make use of aligned memory. Via the configuration // file <tt>./blaze/config/Alignment.h</tt> it is possible to define the default alignment flag: \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned \endcode // Alternatively it is possible set the default alignment flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ... \endcode \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned // memory. Note however that disabling alignment can considerably reduce the performance of all // operations with these vector and matrix types! // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for the vector types \ref vector_types_static_vector // and \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and // \ref matrix_types_hybrid_matrix in order to achieve maximum performance in all operations. Due // to padding, the proper alignment of data elements can be guaranteed and the need for remainder // loops is minimized. However, on the downside padding introduces an additional memory overhead, // which can be large depending on the used data type. // // The configuration file <tt>./blaze/config/Padding.h</tt> provides a compile time switch that // can be used to define the default padding flag: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded \endcode // Alternatively it is possible to define the default padding flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_PADDING_FLAG=blaze::padded ... \endcode \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::padded, by default padding is enabled // for \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix // and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by // default disabled. Note however that disabling padding can considerably reduce the performance // of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_STREAMING=1 ... \endcode \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is // disabled. It is recommended to consult the target architecture's white papers to decide whether // streaming is beneficial or hurtful for performance. // // // \n Previous: \ref customization &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices********************************************************** /*!\page vector_and_matrix_customization Customization of Vectors and Matrices // // \tableofcontents // // // \n \section custom_data_members Custom Data Members // <hr> // // So far the \b Blaze library does not provide a lot of flexibility to customize the data // members of existing \ref vector_types and \ref matrix_types. However, to some extend it is // possible to customize vectors and matrices by inheritance. The following example gives an // impression on how to create a simple variation of \ref matrix_types_custom_matrix, which // automatically takes care of acquiring and releasing custom memory. \code template< typename Type // Data type of the matrix , bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit inline MyCustomMatrix( size_t m, size_t n ) : CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { this->reset( array_.get(), m, n ); } private: std::unique_ptr<Type[]> array_; }; \endcode // Please note that this is a simplified example with the intent to show the general approach. // The number of constructors, the memory acquisition, and the kind of memory management can of // course be adapted to specific requirements. Also, please note that since none of the \b Blaze // vectors and matrices have virtual destructors polymorphic destruction cannot be used. // // // \n \section custom_operations Custom Operations // <hr> // // There are two approaches to extend \b Blaze with custom operations. First, the \c map() // functions provide the possibility to execute componentwise custom operations on vectors and // matrices. Second, it is possible to add customized free functions. // // \n \subsection custom_operations_map The map() Functions // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors and matrices. The unary \c map() function can be used to apply a custom // operation on each single element of a dense vector or matrix or each non-zero element of a // sparse vector or matrix. For instance, the following example demonstrates a custom square // root computation on a dense matrix: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors or two dense matrices. The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( ( 2.1, 0.3) (-4.2, 1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // These examples demonstrate the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom // functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is neither called nor instantiated. // // By default the \c map() function uses peel-off and remainder loops if the number of elements is // not a multiple of the width of the packed SIMD type. However, all dense vector and matrix types // in \b Blaze provide padding as an optimization. In case the custom operation preserves the // value zero of the padding elements, it is possible to omit the peel-off and remainder loops, // include the padding elements in the computation and by that increase performance. For that // purpose the \c paddingEnabled() function can be added to the functor: \code struct Sqrt { // ... static constexpr bool paddingEnabled() { return true; } }; \endcode // Also the \c paddingEnabled() function must be a \c static, \c constexpr function and must // return whether padding elements can be used in the custom operation. In case the function // returns \c true, the padding elements are used during a vectorized operation, in case the // function returns \c false, the padding elements are not used. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } static constexpr bool paddingEnabled() { return true; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } static constexpr bool paddingEnabled() { return true; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n \subsection custom_operations_free_functions Free Functions // // In order to extend \b Blaze with new functionality it is possible to add free functions. Free // functions can be used either as wrappers around calls to the map() function or to implement // general, non-componentwise operations. The following two examples will demonstrate both ideas. // // The first example shows the \c setToZero() function, which resets a sparse matrix to zero // without affecting the sparsity pattern. It is implemented as a convenience wrapper around // the map() function: \code template< typename MT // Type of the sparse matrix , bool SO > // Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = blaze::map( ~mat, []( const auto& value ){ return decltype(value){}; } ); } \endcode // The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and // provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the // <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a> // it also enables a conversion back to the actual type. This downcast is performed via the tilde // operator (i.e. \c operator~()). The template parameter \c SO represents the storage order // (blaze::rowMajor or blaze::columnMajor) of the matrix. // // The second example shows the \c countZeros() function, which counts the number of values, which // are exactly zero, in a dense, row-major matrix: \code template< typename MT > size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); const size_t N( (~mat).columns() ); size_t count( 0UL ); for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } return count; } \endcode // The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again, // it is possible to perform the conversion to the actual type via the tilde operator. // // The following two listings show the declarations of all vector and matrix base classes, which // can be used for custom free functions: \code template< typename VT // Concrete type of the dense or sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class Vector; template< typename VT // Concrete type of the dense vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class DenseVector; template< typename VT // Concrete type of the sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class SparseVector; \endcode \code template< typename MT // Concrete type of the dense or sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class Matrix; template< typename MT // Concrete type of the dense matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; template< typename MT // Concrete type of the sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class SparseMatrix; \endcode // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(), \c cblas_cdotu_sub(), and // \c cblas_zdotu_sub()): \code namespace blaze { float dotu( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY ); double dotu( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY ); complex<float> dotu( blas_int_t n, const complex<float>* x, blas_int_t incX, const complex<float>* y, blas_int_t incY ); complex<double> dotu( blas_int_t n, const complex<double>* x, blas_int_t incX, const complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(), // \c cblas_cdotc_sub(), and \c cblas_zdotc_sub()): \code namespace blaze { float dotc( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY ); double dotc( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY ); complex<float> dotc( blas_int_t n, const complex<float>* x, blas_int_t incX, const complex<float>* y, blas_int_t incY ); complex<double> dotc( blas_int_t n, const complex<double>* x, blas_int_t incX, const complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c cblas_saxpy(), \c cblas_daxpy(), \c cblas_caxpy(), and // \c cblas_zaxpy()): \code namespace blaze { void axpy( blas_int_t n, float alpha, const float* x, blas_int_t incX, float* y, blas_int_t incY ); void axpy( blas_int_t n, double alpha, const double* x, blas_int_t incX, double* y, blas_int_t incY ); void axpy( blas_int_t n, complex<float> alpha, const complex<float>* x, blas_int_t incX, complex<float>* y, blas_int_t incY ); void axpy( blas_int_t n, complex<double> alpha, const complex<double>* x, blas_int_t incX, complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c cblas_sgemv(), \c cblas_dgemv(), \c cblas_cgemv(), // and \c cblas_zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, const float* x, blas_int_t incX, float beta, float* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, const double* x, blas_int_t incX, double beta, double* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, const complex<float>* x, blas_int_t incX, complex<float> beta, complex<float>* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, const complex<double>* x, blas_int_t incX, complex<double> beta, complex<double>* y, blas_int_t incY ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c cblas_strmv(), \c cblas_dtrmv(), // \c cblas_ctrmv(), and \c cblas_ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c cblas_sgemm(), \c cblas_dgemm(), \c cblas_cgemm(), // and \c cblas_zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, float alpha, const float* A, blas_int_t lda, const float* B, blas_int_t ldb, float beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, double alpha, const double* A, blas_int_t lda, const double* B, blas_int_t ldb, double beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, complex<float> alpha, const complex<float>* A, blas_int_t lda, const complex<float>* B, blas_int_t ldb, complex<float> beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, complex<double> alpha, const complex<double>* A, blas_int_t lda, const complex<double>* B, blas_int_t ldb, complex<double> beta, float* C, blas_int_t ldc );x } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c cblas_strmm(), \c cblas_dtrmm(), // \c cblas_ctrmm(), and \c cblas_ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, float* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, double* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c cblas_strsm(), \c cblas_dtrsm(), \c cblas_ctrsm(), and // \c cblas_ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, float* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, double* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code using blaze::blas_int_t; constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const blas_int_t m ( numeric_cast<blas_int_t>( A.rows() ) ); // == N const blas_int_t n ( numeric_cast<blas_int_t>( A.columns() ) ); // == N const blas_int_t lda ( numeric_cast<blas_int_t>( A.spacing() ) ); // >= N const blas_int_t lwork( n*lda ); const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required blas_int_t info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // In this context, \c blas_int_t is either a 32-bit or 64-bit signed integral type, depending // on the setting of the \c BLAZE_BLAS_IS_64BIT compilation switch (see \ref blas_mode). // // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code using blaze::blas_int_t; constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void hetrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c sorg2r(), \c dorg2r(), \c cungqr(), \c zunqqr(), \c cung2r(), and \c zung2r(), which // reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void org2r( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void org2r( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void org2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ung2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c sorgr2(), \c dorgr2(), \c cungrq(), \c zunqrq(), \c cungr2(), and \c zunqr2(), which // reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void orgr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ungr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c sorg2l(), \c dorg2l(), \c cungql(), \c zungql(), \c cung2l(), and \c zung2l(), which // reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgql( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void org2l( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void org2l( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void org2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ung2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c sorgl2(), \c dorgl2(), \c cunglq(), \c zunqlq(), \c cungl2(), and \c zunql2(), which // reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orglq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void orgl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ungl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info ); void hetri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, float* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, double* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If the // function exits successfully, the vector \a b or the matrix \a B contain the solution(s) of the // linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, blas_int_t n, float* A, blas_int_t lda, float* wr, float* wi, float* VL, blas_int_t ldvl, float* VR, blas_int_t ldvr, float* work, blas_int_t lwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, double* A, blas_int_t lda, double* wr, double* wi, double* VL, blas_int_t ldvl, double* VR, blas_int_t ldvr, double* work, blas_int_t lwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* w, complex<float>* VL, blas_int_t ldvl, complex<float>* VR, blas_int_t ldvr, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* w, complex<double>* VL, blas_int_t ldvl, complex<double>* VR, blas_int_t ldvr, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* info ); void syev( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info ); void syevd( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, float* Z, blas_int_t ldz, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); void syevx( char jobz, char range, char uplo, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, double* Z, blas_int_t ldz, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void heev( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info ); void heevd( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, complex<float>* Z, blas_int_t ldz, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); void heevx( char jobz, char range, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, complex<double>* Z, blas_int_t ldz, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_examples Examples // <hr> // // The first example demonstrates the multiplication between a statically sized block matrix // and a block vector: \code using namespace blaze; // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = StaticVector<int,2UL,columnVector>; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // The second example shows the multiplication between a compressed block matrix with blocks of // varying size and a compressed block vector: \code using namespace blaze; // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) ) // ( ) ( ) ( ) // ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = HybridVector<int,3UL,columnVector>; CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = V3{ 2 }; x[2] = V3{ -1, 2 }; CompressedVector<V3,columnVector> y( A * x ); \endcode // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug? // // The size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can indeed be larger // than expected: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36 \endcode // In order to achieve the maximum possible performance the \b Blaze library tries to enable // SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding // elements for all dense vectors and matrices to guarantee that at least a single SIMD vector // can be loaded. Depending on the used SIMD technology that can significantly increase the size // of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix : \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) \endcode // The configuration files <tt>./blaze/config/Padding.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code g++ ... -BLAZE_DEFAULT_PADDING_FLAG=blaze::padded ... \endcode \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::padded, by default padding is enabled // for \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, // and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by // default disabled. Note however that disabling padding can considerably reduce the performance // of all dense vector and matrix operations! // // // <hr> // \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug? // // Despite disabling padding via the \c BLAZE_DEFAULT_PADDING_FLAG compile time switch (see // \ref faq_padding), the size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can still be larger than // expected: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::unpadded #include <blaze/Blaze.h> StaticVector<int,3> a; StaticVector<int,5> b; sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) \endcode // The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128 // bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit). // Since the second vector contains enough elements is possible to benefit from vectorization. // However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512 // is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 // integers, respectively. Even the second vector does not hold enough elements to benefit from // vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // // It is possible to disable the SIMD-specific alignment for \ref vector_types_static_vector, // \ref matrix_types_static_matrix, \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix // via the compile time switch in the <tt>./blaze/config/Alignment.h</tt> configuration file: \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned \endcode // Alternatively it is possible set the default alignment flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ... \endcode \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned // memory. Note however that disabling alignment can considerably reduce the performance of all // operations with these vector and matrix types! // // Alternatively it is possible to disable the vectorization entirely by the compile time switch // in the <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_VECTORIZATION=1 ... \endcode \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_std_vector I experience crashes when using StaticVector/StaticMatrix in a std::vector. Is this a bug? // // With active vectorization the elements of a \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // are possibly over-aligned to meet the alignment requirements of the available instruction set // (SSE, AVX, AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, // \c double, ...) and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes // for SSE, 32 bytes for AVX, and 64 bytes for AVX-512. All other types are aligned according to // their intrinsic alignment: \code struct Int { int i; }; using VT1 = blaze::StaticVector<double,3UL>; using VT2 = blaze::StaticVector<complex<float>,2UL>; using VT3 = blaze::StaticVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // For this reason \ref vector_types_static_vector, \ref vector_types_hybrid_vector, // \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix cannot be used in // containers using dynamic memory such as \c std::vector without additionally providing an // allocator that can provide over-aligned memory: \code using Type = blaze::StaticVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // It is possible to disable the vectorization entirely by the compile time switch in the // <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_blas To which extend does Blaze make use of BLAS functions under the hood? // // Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions // for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze // kernels. // // The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels // or its own custom kernels. In case of the dense matrix multiplication this decision is based // on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for // small matrices it uses its own custom kernels. The threshold for this decision can be // configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood? // // \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the // determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to // use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will // get link time errors. // // Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it? // // The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze // library, which by now is several hundred thousand lines of source code. That means that a lot // of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that everything is required within a single compilation unit. Therefore it is easily // possible to reduce compile times by including only those \b Blaze features that are used within // the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be // enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation // times by about 20%. // // Additionally we are taking care to implement new \b Blaze functionality such that compile times // do not explode and try to reduce the compile times of existing features. Thus newer releases of // \b Blaze can also improve compile times. // // // <hr> // \section faq_custom_operations Blaze does not provide feature XYZ. What can I do? // // In some cases you might be able to implement the required functionality very conveniently by // building on the existing \c map() functions (see \ref custom_operations_map). For instance, // the following code demonstrates the addition of a function that merges two vectors of floating // point type into a vector of complex numbers: \code template< typename VT1, typename VT2, bool TF > decltype(auto) zip( const blaze::DenseVector<VT1,TF>& lhs, const blaze::DenseVector<VT2,TF>& rhs ) { return blaze::map( ~lhs, ~rhs, []( const auto& r, const auto& i ) { using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; return std::complex<std::common_type_t<ET1,ET2>>( r, i ); } ); } \endcode // You will find a summary of the necessary steps to create custom features in \ref customization. // // Sometimes, however, the available customization points might not be sufficient. In this case // you are cordially invited to create a pull request that provides the implementation of a // feature or to create an issue according to our \ref issue_creation_guidelines. Please try // to explain the feature as descriptive as possible, for instance by providing conceptual code // examples. // // \n Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page issue_creation_guidelines Issue Creation Guidelines // // \tableofcontents // // // One of the most important aspects of the \b Blaze project is the // <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official // \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests // and bug reports, as we believe that this is a significant part of making \b Blaze a better // library. However, we are asking to follow a small set of guidelines when creating an issue // to facilitate the issue management on our side and also to make issues more useful for users // of \b Blaze. // // // <hr> // \section issues_title Title // // The title is the most important detail of an issue. A well chosen title makes it easy to grasp // the idea of an issue and improves the discoverability. Therefore, please choose a title that // is ... // // - ... as descriptive as possible; // - ... as concise as possible; // - ... as unambiguous as possible. // // Also, please create a separate issue for each idea/problem/etc. A very general title or an // \"and\" in the title could be an indication that the issue is not specific enough and should // be split into several issues. // // \subsection issues_title_good_examples Good Examples // // - \"Provide support for AVX-512 SIMD operations\" // - \"Add support for the Boost Multiprecision Library\" // - \"Introduce reduction operations into Blaze\" // - \"Compilation error on KNL with -march=knl\" // // \subsection issues_title_bad_examples Bad Examples // // - \"Several requests\" (instead create separate issues for each single request) // - \"Improve the performance\" (instead specify which operation should perform better) // - \"Blaze library compilation error\" (instead try to be more specific) // // // <hr> // \section issues_description Description // // The description should help us to understand your idea or problem in as much detail as possible. // Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how // the behavior should be, etc.). Please spend a couple of minutes to try to make the description // as comprehensive as possible. // // // <hr> // \section issues_assignee Assignee // // There is no need to assign the issue to a particular person. It is perfectly ok if you just // ignore this setting. // // // <hr> // \section issues_kind Kind of Issue // // There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug, // \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the // following we try to give guidelines on which kind to choose for a particular issue: // // \subsection issues_kind_bug Bug // // Please choose the category \ref issues_kind_bug if ... // // - ... you experience a compilation error despite your best efforts to get it right; // - ... you experience a crash/failure despite your best efforts to get it right; // - ... you experience problems when combining features; // - ... a feature does not work as specified/documented (i.e. can be considered broken). // // Please \b don't choose the category \ref issues_kind_bug if ... // // - ... you feel a feature should work differently than it currently does (instead create a // \ref issues_kind_proposal with a convincing title and description); // - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement // issue to extend the documentation); // - ... you are missing a feature (instead create a \ref issues_kind_proposal or // \ref issues_kind_enhancement issue). // // If you select the category \ref issues_kind_bug, please also try to provide a minimum example // that fails. That helps us to minimize the time to resolve the bug. // // As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will // also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of // the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze. // // \subsection issues_kind_enhancement Enhancement // // Please choose the category \ref issues_kind_enhancement if ... // // - ... you need an add-on to an existing feature; // - ... you need an extension of an existing feature; // - ... you need an extended documentation for an existing feature. // // \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind // if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa. // Just make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // // Please choose the category \ref issues_kind_proposal if ... // // - ... you want to request a new feature; // - ... you want to change an existing feature. // // \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if // a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just // make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // Please choose the category \ref issues_kind_task if ... // // - ... you want us to do something not feature related; // - ... you have something else in mind which does not fall in the other three categories. // // // <hr> // \section issues_priority Priority // // Via the priority of an issue you can tell us how important the issue is to you. Therefore the // priority can have an influence on when we will deal with the issue. However, unfortunately we // don't have an infinite amount of time and we can not deal with an arbitrary amount of issues // at the same time. We will therefore take the priority into account, but mainly schedule the // issues based on impact to all \b Blaze users and the estimated time to resolve it. // // You can choose between \ref issues_priority_blocker, \ref issues_priority_critical, // \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial. // // \subsection issues_priority_blocker Blocker // // Please choose a \ref issues_priority_blocker priority if ... // // - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users. // // Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal // should never be a \ref issues_priority_blocker! // // \subsection issues_priority_critical Critical // // Please choose a \ref issues_priority_critical priority if ... // // - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful; // - ... you cannot use \b Blaze without the proposed feature; // - ... you consider it to be essential for \b all \b Blaze users. // // \subsection issues_priority_major Major // // Please choose a \ref issues_priority_major priority if ... // // - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but // still very important to you; // - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users. // // The \ref issues_priority_major category is the default setting in Bitbucket and we therefore // consider it as the default priority for issues. // // \subsection issues_priority_minor Minor // // Please choose a \ref issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug does not affect many \b Blaze users; // - ... a feature request would only be useful for a small number of \b Blaze users; // - ... a feature would be nice to have, but is not particularly important. // // \subsection issues_priority_trivial Trivial // // Please choose a \ref issues_priority_trivial priority if ... // // - ... a \ref issues_kind_bug hardly affects anyone; // - ... a feature request would only be useful for very few \b Blaze users; // - ... the expected time to resolve an issue is very small. // // // <hr> // \section issues_attachment Attachments // // You can always provide us with additional information in the form of attachments. Feel free // to attach something to the issue if ... // // - ... it can help us to analyze a \ref issues_kind_bug; // - ... you have some source code that demonstrates a problem; // - ... you already have a working prototype that sketches the idea; // - ... you have additional resources that could help us. // // We appreciate anything that simplifies our work and speeds up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref blaze_references \n */ //************************************************************************************************* //**Blaze References******************************************************************************* /*!\page blaze_references Blaze References // // In case you need references to the \b Blaze library (for papers or other publications), please // feel free to use one of the following references: \code @misc{blazelib, author = "Klaus {Iglberger}", title = "Blaze C++ Linear Algebra Library", howpublished = "https://bitbucket.org/blaze-lib", year = 2012 } \endcode \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies", journal = "SIAM Journal on Scientific Computing", year = 2012, volume = 34(2), pages = C42--C69 } \endcode \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High Performance Smart Expression Template Math Libraries", booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year = 2012 } \endcode // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif