source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
|---|---|
GB_unaryop__minv_int64_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_int64
// op(A') function: GB_tran__minv_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sumProductSparseC.c
|
#include <mex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
double* prediction = mxGetPr(prhs[0]); /* containts evaluated appearance models for all pixel in that BScan and all boundary models */
double* mu = mxGetPr(prhs[1]);
double* WML = mxGetPr(prhs[2]);
double sigmaML = (double) mxGetScalar(prhs[3]);
int* idxA = (int*) mxGetData(prhs[4]);
double *hashTable = mxGetPr(prhs[5]);
int* boundsPred = (int*) mxGetData(prhs[6]);
/* int* idxRows = (int*) mxGetPr(prhs[4]); */
int M = mxGetN(prhs[2]); /* number of eigenmodes of the shape prior */
int N = mxGetM(prhs[2]); /* number of rows in WML */
int numColumns = mxGetNumberOfElements(prhs[4]); /* for each column in the shape prior model there is one index */
/*insert debug message here*/
// mexPrintf("%d",numColumns);
const int* dims = mxGetDimensions(prhs[0]);
int numBounds = dims[1]; /* number of columns of pObs */
int numRows = mxGetM(prhs[0]); /* number of rows of pObs */
int numColumnsShape = (int) (double)N/(double)numBounds; /* for each column in the shape prior model there is one index */
/* only calculate pairiwse probabilities up to this precision */
double eps = pow(10,-25);
double* gamma = NULL;
/* return the probabilities for this B-Scan column */
int dimArray[3] = {numRows,numBounds,numColumns};
plhs[0] = mxCreateNumericArray(3,dimArray,mxDOUBLE_CLASS,mxREAL);
gamma = mxGetPr(plhs[0]);
#pragma omp parallel
{
double var_a_b, factor, mu_a, mu_b, prec_a_b, prec_a_a, evalDens, aTilde;
double variance, varInv;
double* prec = malloc(2*(numBounds-1)*sizeof(double));
double var1, var2, var3;
double* alpha = malloc(numBounds*numRows*sizeof(double));
double* beta = malloc(numBounds*numRows*sizeof(double));
double* c = malloc(numBounds*sizeof(double));
double cInv, cTmp;
double* prodTmp = malloc(numRows*sizeof(double));
int rowStart, rowEnd;
int boundA, boundB;
int numNotZero;
int i,k,idx,column,predOffset;
int muFloor, startVal, stopVal;
double mu_b_a, mu_a_b, tmpVal, prec_a_aaTilde;
#pragma omp for
for (column=0; column < numColumns; column++) {
/* for (column = 0; column < 1; column++) {*/
rowStart = boundsPred[column*2]; rowEnd = boundsPred[column*2+1];
memset(alpha,0,numRows*numBounds*sizeof(double));
memset(beta,0,numRows*numBounds*sizeof(double));
predOffset = numBounds*numRows*column; /* shifts the pointer inside the prediction matrix to the next BScan column */
/* calculate for the first boundary; calculate the 1-d shape marginal p(a) for column j on the fly */
/* calculate the variance for the shape prior density: \Sigma = WW^T + sigma^2I */
variance = 0;
for (i=0; i < M; i++) {
variance += WML[idxA[column] + i*N]*WML[idxA[column] + i*N];
}
variance += sigmaML; varInv = -0.5/variance;
factor = 1/sqrt(2*3.1415926535897*variance);
cTmp = 0;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i] = factor*exp(varInv*(i+1 - mu[idxA[column]])*(i+1-mu[idxA[column]]))*prediction[i + predOffset];
cTmp += alpha[i];
}
c[0] = cTmp; cInv = 1/cTmp;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i] = alpha[i]*cInv;
}
/* calculate the precision matrices required for conditional densities p(a|b) */
for (k=0; k < numBounds-1; k++) {
var1 = 0; var2 = 0; var3 = 0;
for (i=0; i < M; i++) {
var1 += WML[idxA[column] + numColumnsShape*k + i*N]*WML[idxA[column] + numColumnsShape*k + i*N];
var2 += WML[idxA[column] + numColumnsShape*k + i*N]*WML[idxA[column] + numColumnsShape*(k+1) + i*N];
var3 += WML[idxA[column] + numColumnsShape*(k+1) + i*N]*WML[idxA[column] + numColumnsShape*(k+1) + i*N];
}
var1 += sigmaML;
var3 += sigmaML;
factor = 1/(var1*var3 - var2*var2);
prec[0 + k*2] = factor*(-var2);
prec[1 + k*2] = factor*var1;
}
/* calculate the remaining boundaries */
for (k=1; k < numBounds; k++) {
/* calculate parameters of distribution p(b|a); b is the boundary k-1; a is boundary k */
var_a_b = 1/prec[1 + (k-1)*2]; /* for the conditional density, variance is given by the inverse precision matrix */
factor = 1/sqrt(2*3.1415926535897*var_a_b);
/* calulate the number of elements larger than eps for each row */
cTmp = 0;
mu_b = mu[idxA[column] + numColumnsShape*(k-1)]; mu_a = mu[idxA[column] + numColumnsShape*k]; prec_a_b = prec[0 + (k-1)*2]; prec_a_a = prec[1 + (k-1)*2]; aTilde = prec_a_b/prec_a_a;
prec_a_aaTilde = 0.5*(1/prec_a_a)*prec_a_b*prec_a_b;
numNotZero = (int) ceil(abs(sqrt(-log(eps*factor)*2*var_a_b)/aTilde));
/* value of boundary k */
for (boundA = rowStart+1; boundA <= rowEnd; boundA++) {
mu_b_a = ((mu_a - boundA)/aTilde + mu_b);
muFloor = (int) mu_b_a;
/* the position of boundary k-1 can lie between 1 and boundary k and is constrained to lie within 2*numNotZero + 1 around its mean mu_b_a */
startVal = (muFloor-numNotZero < rowStart+1) ? rowStart+1 : muFloor - numNotZero;
stopVal = (muFloor+numNotZero > boundA) ? boundA : muFloor + numNotZero;
tmpVal = 0;
/* value of boundary k-1 */
for (boundB = startVal; boundB <= stopVal; boundB++) {
/* evaluate the conditional density; but only the part inside the exp function */
evalDens = -(boundB-mu_b_a)*(boundB-mu_b_a)*prec_a_aaTilde;
evalDens = hashTable[(int)(-evalDens*1000 + 0.5)];
tmpVal += factor*evalDens*alpha[boundB-1+numRows*(k-1)];
}
alpha[boundA-1+numRows*k] = tmpVal*prediction[boundA-1+numRows*k + predOffset];
cTmp += alpha[boundA-1+numRows*k];
}
/* normalize alpha */
c[k] = cTmp; cInv = 1/cTmp;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i+numRows*k] = alpha[i+numRows*k]*cInv;
}
}
/* initialize beta */
for (i=rowStart; i < rowEnd+1; i++) {
idx = (numBounds-1)*numRows+i;
beta[idx] = 1;
gamma[idx + predOffset] = alpha[idx]*beta[idx];
}
for (k=numBounds-1; k > 0; k--) {
/* precalculate the product of pObs*beta(z_{n+1}) */
cInv = 1/c[k];
for (i=rowStart; i < rowEnd+1; i++) {
prodTmp[i] = cInv*prediction[numRows*k+i + predOffset]*beta[numRows*k+i];
}
var_a_b = 1/prec[1 + (k-1)*2]; /* for the conditional density, variance is given by the inverse precision matrix */
factor = 1/sqrt(2*3.1415926535897*var_a_b);
/* calulate the number of elements larger than eps for each row */
numNotZero = (int) ceil(sqrt(-log(eps*factor)*2*var_a_b));
cTmp = 0;
mu_b = mu[idxA[column] + numColumnsShape*(k-1)]; mu_a = mu[idxA[column] + numColumnsShape*k]; prec_a_b = prec[0 + (k-1)*2]; prec_a_a = prec[1 + (k-1)*2]*0.5;
for (boundB = rowStart+1; boundB <= rowEnd; boundB++) {
mu_a_b = mu_a - var_a_b*prec_a_b*(boundB-mu_b);
muFloor = (int) mu_a_b;
/* position of boundary k (called here boundA) */
/* check for all possible values of boundB in this row: has to be at least boundA, can be at most numRows and we limit it to be not further away from muFloor than numNotZero */
startVal = (muFloor-numNotZero < boundB) ? boundB : muFloor - numNotZero;
stopVal = (muFloor+numNotZero > rowEnd) ? rowEnd : muFloor + numNotZero;
tmpVal = 0;
for (boundA = startVal; boundA <= stopVal; boundA++) {
evalDens = -(boundA-mu_a_b)*(boundA-mu_a_b)*prec_a_a;
evalDens = hashTable[(int)(-evalDens*1000 + 0.5)];
tmpVal += factor*evalDens*prodTmp[boundA-1];
}
idx = boundB-1+numRows*(k-1);
beta[idx] = tmpVal;
gamma[idx + predOffset] = beta[idx]*alpha[idx];
}
}
}
free(alpha); free(beta); free(prodTmp); free(prec); free(c);
}
}
|
pi_omp_lock_4.c
|
/*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The is the original sequential program. It uses the timer
from the OpenMP runtime library
History: Written by Tim Mattson, 11/99.
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 1024 * 1024 * 1024;
double step;
int main () {
const int MAX_T = 16;
int i, t;
double x, pi;
double start_time, run_time;
omp_lock_t lock;
omp_init_lock(&lock);
step = 1.0/(double) num_steps;
for(t = 1; t <= MAX_T; t*=2) {
start_time = omp_get_wtime();
omp_set_num_threads(t);
pi = 0.0;
#pragma omp parallel
{
int i, nt;
double x, sum = 0;
i = omp_get_thread_num();
nt = omp_get_num_threads();
for (; i < num_steps; i += nt){
x = (i + 0.5) * step;
sum += 4.0/(1.0+x*x);
}
omp_set_lock(&lock);
pi += sum;
omp_unset_lock(&lock);
}
pi = pi * step;
run_time = omp_get_wtime() - start_time;
printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time);
}
omp_destroy_lock(&lock);
}
|
looop.c
|
#include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#define TAM 64*127*5
#define ITERACOES_TESTE 100000
int main(){
int i,contador;
long soma;
int *vetor = calloc(TAM,sizeof(int));
if(vetor == NULL){
printf("Falha ao alocar memória");
return -1;
}
for(contador=0; contador < ITERACOES_TESTE; contador++){
#pragma omp parallel num_threads(2)
{//thread1 faz a primeira metade
if(omp_get_thread_num()==0){
for(int i=0; i<TAM/2; i++){
vetor[i] ++;
}
}else if(omp_get_thread_num()==1){//thread2 faz a segunda metade
for(int i=TAM/2; i<TAM; i++){
vetor[i] ++;
}
}
}
}
soma = 0;
for(i=0; i<TAM; i++){
soma += vetor[i];
}
printf("%ld\n", soma);
free(vetor);
return 0;
}
|
DRB021-reductionmissing-orig-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel with two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs: we allow multiple pairs to preserve the pattern.
sum@70:7 vs. sum@70:7
sum@70:7 vs. sum@70:13
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i,j;
float temp, sum=0.0;
int len=100;
float u[100][100];
#pragma omp parallel for private(i ,j )
for (i = 0; i < len; i++)
#pragma omp parallel for private(j )
for (j = 0; j < len; j++)
u[i][j] = 0.5;
#pragma omp parallel for private(temp ,j ) reduction(+:sum)
for (i = 0; i < len; i++)
#pragma omp parallel for private(temp ,j ) reduction(+:sum)
for (j = 0; j < len; j++)
{
temp = u[i][j];
sum = sum + temp * temp;
}
printf ("sum = %f\n", sum);
return 0;
}
|
gamma_index_ivfpq.h
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "field_range_index.h"
#include "gamma_common_data.h"
#include "gamma_index.h"
#include "gamma_index_flat.h"
#include "log.h"
#include "raw_vector.h"
#include "realtime_invert_index.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(ivfpq.metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
assert(METRIC_TYPE == metric_type);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
struct GammaInvertedListScanner : faiss::InvertedListScanner {
GammaInvertedListScanner() {
docids_bitmap_ = nullptr;
raw_vec_ = nullptr;
range_index_ptr_ = nullptr;
}
virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) = 0;
inline void SetVecFilter(const char *docids_bitmap,
const RawVector<float> *raw_vec) {
if (docids_bitmap == nullptr) {
LOG(ERROR) << "docids_bitmap is NULL!";
return;
}
if (!docids_bitmap_) {
docids_bitmap_ = docids_bitmap;
}
if (!raw_vec_) {
raw_vec_ = raw_vec;
}
return;
}
inline void set_search_condition(const GammaSearchCondition *condition) {
this->range_index_ptr_ = condition->range_query_result;
}
const char *docids_bitmap_;
const RawVector<float> *raw_vec_;
MultiRangeQueryResults *range_index_ptr_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
bool store_pairs_;
GammaIVFPQScanner(const faiss::IndexIVFPQ &ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(ivfpq, nullptr) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
assert(this->pq.M % 4 == 0);
// set filter func
std::function<bool(int)> is_filterable;
if (range_index_ptr_ != nullptr) {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id) ||
(not range_index_ptr_->Has(doc_id)));
};
} else {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id));
};
}
// set compute distance func
std::function<float(const uint8_t *)> calc_dis;
if (this->pq.M % 4 == 0) {
calc_dis = [this](const uint8_t *codes) -> float {
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m += 4) {
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
}
return dis;
};
} else {
calc_dis = [this](const uint8_t *codes) -> float {
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++], tab += this->pq.ksub;
}
return dis;
};
}
#define HANDLE_ONE \
do { \
if (res.ids[j] & realtime::kDelIdxMask) { \
codes += this->pq.M; \
j++; \
continue; \
} \
int doc_id = \
raw_vec_->vid_mgr_->VID2DocID(res.ids[j] & realtime::kRecoverIdxMask); \
if ((range_index_ptr_ != nullptr && \
(not range_index_ptr_->Has(doc_id))) || \
bitmap::test(docids_bitmap_, doc_id)) { \
codes += this->pq.M; /* increment pointer */ \
j++; /* increment j*/ \
continue; \
} \
\
float dis = this->dis0; \
const float *tab = this->sim_table; \
for (size_t m = 0; m < this->pq.M; m += 4) { \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
} \
\
res.add(j, dis); \
\
j++; /* increment j */ \
} while (0)
size_t j = 0;
size_t loops = ncode / 8;
for (size_t i = 0; i < loops; i++) {
HANDLE_ONE; // 1
HANDLE_ONE; // 2
HANDLE_ONE; // 3
HANDLE_ONE; // 4
HANDLE_ONE; // 5
HANDLE_ONE; // 6
HANDLE_ONE; // 7
HANDLE_ONE; // 8
}
switch (ncode % 8) {
case 7:
HANDLE_ONE;
case 6:
HANDLE_ONE;
case 5:
HANDLE_ONE;
case 4:
HANDLE_ONE;
case 3:
HANDLE_ONE;
case 2:
HANDLE_ONE;
case 1:
HANDLE_ONE;
}
assert(j == ncode);
#undef HANDLE_ONE
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t **codes,
SearchResultType &res) const {
assert(this->pq.M % 4 == 0);
#define HANDLE_ONE \
do { \
float dis = this->dis0; \
const float *tab = this->sim_table; \
const uint8_t *code = codes[j]; \
for (size_t m = 0; m < this->pq.M; m += 4) { \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
} \
\
res.add(j, dis); \
\
j++; /* increment j */ \
} while (0)
size_t j = 0;
size_t loops = ncode / 8;
for (size_t i = 0; i < loops; i++) {
HANDLE_ONE; // 1
HANDLE_ONE; // 2
HANDLE_ONE; // 3
HANDLE_ONE; // 4
HANDLE_ONE; // 5
HANDLE_ONE; // 6
HANDLE_ONE; // 7
HANDLE_ONE; // 8
}
switch (ncode % 8) {
case 7:
HANDLE_ONE;
case 6:
HANDLE_ONE;
case 5:
HANDLE_ONE;
case 4:
HANDLE_ONE;
case 3:
HANDLE_ONE;
case 2:
HANDLE_ONE;
case 1:
HANDLE_ONE;
}
assert(j == ncode);
#undef HANDLE_ONE
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
template<faiss::MetricType metric, class C>
struct GammaIVFFlatScanner: GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d):d(d) {}
const float *xi;
void set_query (const float *query) override {
this->xi = query;
}
idx_t list_no;
void set_list (idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code (const uint8_t *code) const override {
const float *yj = (float*)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT ?
faiss::fvec_inner_product (xi, yj, d) : faiss::fvec_L2sqr (xi, yj, d);
return dis;
}
inline size_t scan_codes (size_t list_size,
const uint8_t *codes,
const idx_t *ids,
float *simi, idx_t *idxi,
size_t k) const override
{
// set filter func
std::function<bool(int)> is_filterable;
if (range_index_ptr_ != nullptr) {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id) ||
(not range_index_ptr_->Has(doc_id)));
};
} else {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id));
};
}
const float *list_vecs = (const float*)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if(ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if(vid < 0) continue;
int doc_id = raw_vec_->vid_mgr_->VID2DocID(vid);
if(doc_id < 0) continue;
if(is_filterable(doc_id)) continue;
const float *yj = list_vecs + d * vid;
float dis = metric == faiss::METRIC_INNER_PRODUCT ?
faiss::fvec_inner_product (xi, yj, d) : faiss::fvec_L2sqr (xi, yj, d);
if (C::cmp (simi[0], dis)) {
faiss::heap_pop<C> (k, simi, idxi);
faiss::heap_push<C> (k, simi, idxi, dis, doc_id);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) { return 0; }
};
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex(faiss::Index *quantizer, size_t d, size_t nlist, size_t M,
size_t nbits_per_idx, const char *docids_bitmap,
RawVector<float> *raw_vec,
GammaCounters *counters);
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs) const override;
GammaInvertedListScanner *GetGammaIVFFlatScanner(size_t d) const;
GammaInvertedListScanner *GetGammaInvertedListScanner(bool store_pairs) const;
int Indexing() override;
int AddRTVecsToIndex() override;
bool Add(int n, const float *vec);
int Update(int doc_id, const float *vec) { return -1; }
int AddUpdatedVecToIndex();
int Search(const VectorQuery *query, GammaSearchCondition *condition,
VectorResult &result) override;
void search_preassigned(int n, const float *x,
GammaSearchCondition *condition, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int *total, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
void search_ivf_flat(int n, const float *x,
GammaSearchCondition *condition, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int *total, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
// assign the vectors, then call search_preassign
void SearchIVFPQ(int n, const float *x, GammaSearchCondition *condition,
float *distances, idx_t *labels, int *total);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir, int max_vid) override;
int Load(const std::vector<std::string> &index_dirs);
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(int docid);
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
GammaCounters *gamma_counters_;
uint64_t updated_num_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
};
} // namespace tig_gamma
#endif
|
rose_v1_shared.c
|
/*
* dependence graph:
*/
#include <omp.h>
void foo()
{
int i;
int x;
int a[100];
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = a[i] + 1;
}
}
/*
non loop carried anti dependence for array accesses : level =1 > 0
dep SgExprStatement:a[i] =((a[i]) + 1); SgExprStatement:a[i] =((a[i]) + 1); 1*1 ANTI_DEP; commonlevel = 1 CarryLevel = 1 Is precise SgPntrArrRefExp:(a[i])@10:11->SgPntrArrRefExp:a[i]@10:9 == 0;||::
*/
|
3d25pt.c
|
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
composite.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if (traits == UndefinedPixelTrait)
continue;
if (source_traits != UndefinedPixelTrait)
SetPixelChannel(image,channel,p[i],q);
else if (channel == AlphaPixelChannel)
SetPixelChannel(image,channel,OpaqueAlpha,q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case ModulusAddCompositeOp:
case ModulusSubtractCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs(Sa-Da);
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) (QuantumRange-
GetPixelBlack(source_image,p));
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
pixel=Sc+Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case ModulusSubtractCompositeOp:
{
pixel=Sc-Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sca);
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
endif.c
|
/*
* test the handling of #endif at the end of a parallel region
* Extracted from BOTS
* Liao 1/15/2009
* */
#include <stdio.h>
#define FORCE_TIED_TASKS
void find_queens (int size)
{
int total_count=0;
#pragma omp parallel
{
#ifdef FORCE_TIED_TASKS
#pragma omp atomic
total_count += 1;
#endif
// printf("aa");
}
}
|
DRACC_OMP_024_MxV_Missing_Enter_Data_yes.c
|
/*
Matrix Vector multiplication with Matrix missing on Accelerator. Using the target enter data construct.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define C 512
int *a;
int *b;
int *c;
int init(){
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
int Mult(){
#pragma omp target enter data map(to:a[0:C],c[0:C]) map(alloc:b[0:C*C]) device(0)
#pragma omp target device(0)
{
#pragma omp teams distribute parallel for
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
c[i]+=b[j+i*C]*a[j];
}
}
}
#pragma omp target exit data map(from:c[0:C]) map(release:a[0:C],b[0:C*C]) device(0)
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = malloc(C*sizeof(int));
b = malloc(C*C*sizeof(int));
c = malloc(C*sizeof(int));
init();
Mult();
check();
free(a);
free(b);
free(c);
return 0;
}
|
Types.h
|
//===---------- Types.h - OpenMP types ---------------------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_TYPES_H
#define OMPTARGET_TYPES_H
// Tell the compiler that we do not have any "call-like" inline assembly in the
// device rutime. That means we cannot have inline assembly which will call
// another function but only inline assembly that performs some operation or
// side-effect and then continues execution with something on the existing call
// stack.
//
// TODO: Find a good place for this
#pragma omp assumes ext_no_call_asm
/// Base type declarations for freestanding mode
///
///{
using int8_t = char;
using uint8_t = unsigned char;
using int16_t = short;
using uint16_t = unsigned short;
using int32_t = int;
using uint32_t = unsigned int;
using int64_t = long;
using uint64_t = unsigned long;
static_assert(sizeof(int8_t) == 1, "type size mismatch");
static_assert(sizeof(uint8_t) == 1, "type size mismatch");
static_assert(sizeof(int16_t) == 2, "type size mismatch");
static_assert(sizeof(uint16_t) == 2, "type size mismatch");
static_assert(sizeof(int32_t) == 4, "type size mismatch");
static_assert(sizeof(uint32_t) == 4, "type size mismatch");
static_assert(sizeof(int64_t) == 8, "type size mismatch");
static_assert(sizeof(uint64_t) == 8, "type size mismatch");
///}
enum omp_proc_bind_t {
omp_proc_bind_false = 0,
omp_proc_bind_true = 1,
omp_proc_bind_master = 2,
omp_proc_bind_close = 3,
omp_proc_bind_spread = 4
};
enum omp_sched_t {
omp_sched_static = 1, /* chunkSize >0 */
omp_sched_dynamic = 2, /* chunkSize >0 */
omp_sched_guided = 3, /* chunkSize >0 */
omp_sched_auto = 4, /* no chunkSize */
};
enum kmp_sched_t {
kmp_sched_static_chunk = 33,
kmp_sched_static_nochunk = 34,
kmp_sched_dynamic = 35,
kmp_sched_guided = 36,
kmp_sched_runtime = 37,
kmp_sched_auto = 38,
kmp_sched_static_balanced_chunk = 45,
kmp_sched_static_ordered = 65,
kmp_sched_static_nochunk_ordered = 66,
kmp_sched_dynamic_ordered = 67,
kmp_sched_guided_ordered = 68,
kmp_sched_runtime_ordered = 69,
kmp_sched_auto_ordered = 70,
kmp_sched_distr_static_chunk = 91,
kmp_sched_distr_static_nochunk = 92,
kmp_sched_distr_static_chunk_sched_static_chunkone = 93,
kmp_sched_default = kmp_sched_static_nochunk,
kmp_sched_unordered_first = kmp_sched_static_chunk,
kmp_sched_unordered_last = kmp_sched_auto,
kmp_sched_ordered_first = kmp_sched_static_ordered,
kmp_sched_ordered_last = kmp_sched_auto_ordered,
kmp_sched_distribute_first = kmp_sched_distr_static_chunk,
kmp_sched_distribute_last =
kmp_sched_distr_static_chunk_sched_static_chunkone,
/* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
* Since we need to distinguish the three possible cases (no modifier,
* monotonic modifier, nonmonotonic modifier), we need separate bits for
* each modifier. The absence of monotonic does not imply nonmonotonic,
* especially since 4.5 says that the behaviour of the "no modifier" case
* is implementation defined in 4.5, but will become "nonmonotonic" in 5.0.
*
* Since we're passing a full 32 bit value, we can use a couple of high
* bits for these flags; out of paranoia we avoid the sign bit.
*
* These modifiers can be or-ed into non-static schedules by the compiler
* to pass the additional information. They will be stripped early in the
* processing in __kmp_dispatch_init when setting up schedules, so
* most of the code won't ever see schedules with these bits set.
*/
kmp_sched_modifier_monotonic = (1 << 29),
/**< Set if the monotonic schedule modifier was present */
kmp_sched_modifier_nonmonotonic = (1 << 30),
/**< Set if the nonmonotonic schedule modifier was present */
#define SCHEDULE_WITHOUT_MODIFIERS(s) \
(enum kmp_sched_t)( \
(s) & ~(kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic))
#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sched_modifier_monotonic) != 0)
#define SCHEDULE_HAS_NONMONOTONIC(s) \
(((s)&kmp_sched_modifier_nonmonotonic) != 0)
#define SCHEDULE_HAS_NO_MODIFIERS(s) \
(((s) & (kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic)) == \
0)
};
struct TaskDescriptorTy;
using TaskFnTy = int32_t (*)(int32_t global_tid, TaskDescriptorTy *taskDescr);
struct TaskDescriptorTy {
void *Payload;
TaskFnTy TaskFn;
};
#pragma omp begin declare variant match(device = {arch(amdgcn)})
using LaneMaskTy = uint64_t;
#pragma omp end declare variant
#pragma omp begin declare variant match( \
device = {arch(amdgcn)}, implementation = {extension(match_none)})
using LaneMaskTy = uint64_t;
#pragma omp end declare variant
namespace lanes {
enum : LaneMaskTy { All = ~(LaneMaskTy)0 };
} // namespace lanes
/// The ident structure that describes a source location. The struct is
/// identical to the one in the kmp.h file. We maintain the same data structure
/// for compatibility.
struct IdentTy {
int32_t reserved_1; /**< might be used in Fortran; see above */
int32_t flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC
identifies this union member */
int32_t reserved_2; /**< not really used in Fortran any more; see above */
int32_t reserved_3; /**< source[4] in Fortran, do not use for C++ */
char const *psource; /**< String describing the source location.
The string is composed of semi-colon separated fields
which describe the source file, the function and a pair
of line numbers that delimit the construct. */
};
using __kmpc_impl_lanemask_t = LaneMaskTy;
using ParallelRegionFnTy = void *;
using CriticalNameTy = int32_t[8];
struct omp_lock_t {
void *Lock;
};
using InterWarpCopyFnTy = void (*)(void *src, int32_t warp_num);
using ShuffleReductFnTy = void (*)(void *rhsData, int16_t lane_id,
int16_t lane_offset, int16_t shortCircuit);
using ListGlobalFnTy = void (*)(void *buffer, int idx, void *reduce_data);
/// Macros for allocating variables in different address spaces.
///{
// Follows the pattern in interface.h
typedef enum omp_allocator_handle_t {
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
KMP_ALLOCATOR_MAX_HANDLE = ~(0U)
} omp_allocator_handle_t;
enum OMPTgtExecModeFlags : int8_t {
OMP_TGT_EXEC_MODE_GENERIC = 1 << 0,
OMP_TGT_EXEC_MODE_SPMD = 1 << 1,
};
#define __PRAGMA(STR) _Pragma(#STR)
#define OMP_PRAGMA(STR) __PRAGMA(omp STR)
#define SHARED(NAME) \
NAME [[clang::loader_uninitialized]]; \
OMP_PRAGMA(allocate(NAME) allocator(omp_pteam_mem_alloc))
// TODO: clang should use address space 5 for omp_thread_mem_alloc, but right
// now that's not the case.
#define THREAD_LOCAL(NAME) \
NAME [[clang::loader_uninitialized, clang::address_space(5)]]
// TODO: clang should use address space 4 for omp_const_mem_alloc, maybe it
// does?
#define CONSTANT(NAME) \
NAME [[clang::loader_uninitialized, clang::address_space(4)]]
///}
#endif
|
ThreadSafeVec.h
|
//
// smarties
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati ([email protected]).
//
#ifndef smarties_ThreadSafeVec_h
#define smarties_ThreadSafeVec_h
#include "../Settings/Definitions.h"
#include <cassert>
#include <memory>
#include <omp.h>
namespace smarties
{
template<typename T>
struct THRvec
{
Uint nThreads;
const T initial;
std::vector<std::unique_ptr<T>> m_v;
THRvec(const Uint size, const T init=T()) : nThreads(size), initial(init)
{
m_v.resize(nThreads);
#pragma omp parallel for num_threads(nThreads) schedule(static, 1)
for(Uint i=0; i<nThreads; ++i) m_v[i] = std::make_unique<T>(initial);
}
THRvec(const THRvec&c) = delete;
void resize(const Uint N)
{
if(N == nThreads) return;
m_v.resize(N);
nThreads = N;
#pragma omp parallel for schedule(static, 1)
for(Uint i=0; i<N; ++i) {
if(m_v[i]) continue;
m_v[i] = std::make_unique<T>(initial);
}
}
Uint size() const { return nThreads; }
T& operator[] (const Uint i) const
{
assert(m_v[i]);
return * m_v[i].get();
}
};
} // end namespace smarties
#endif // smarties_Settings_h
|
dataset.h
|
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/meta.h>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <vector>
#include <utility>
#include <functional>
#include <string>
#include <unordered_set>
#include <mutex>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const float* label, data_size_t len);
void SetWeights(const float* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(FILE* file) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const float* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, float value)
{
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, float value)
{
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value)
{
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const float* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const float* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
const char* data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<float> label_;
/*! \brief Weights data */
std::vector<float> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<float> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const IOConfig& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name: feature_names_){
if (feature_name.find(' ') != std::string::npos){
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName){
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
private:
const char* data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
bool is_finish_load_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
target_data-4.c
|
/* { dg-do run } */
#include <stdlib.h>
#define EPS 0.000001
const int MAX = 1800;
void check (double *a, double *b, int N)
{
int i;
for (i = 0; i < N; i++)
if (a[i] - b[i] > EPS || b[i] - a[i] > EPS)
abort ();
}
void init (double *a1, double *a2, int N)
{
double s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void vec_mult_ref (double *p1, double *v3, double *v4, int N)
{
int i;
for (i = 0; i < N; i++)
p1[i] = v3[i] * v4[i];
}
void foo_ref (double *p0, double *v1, double *v2, int N)
{
init (v1, v2, N);
vec_mult_ref (p0, v1, v2, N);
}
void vec_mult (double *p1, double *v3, double *v4, int N)
{
int i;
#pragma omp target map(to: v3[0:N], v4[:N]) map(from: p1[0:N])
#pragma omp parallel for
for (i = 0; i < N; i++)
p1[i] = v3[i] * v4[i];
}
void foo (double *p0, double *v1, double *v2, int N)
{
init (v1, v2, N);
#pragma omp target data map(to: v1[0:N], v2[:N]) map(from: p0[0:N])
vec_mult (p0, v1, v2, N);
}
int main ()
{
double *p1 = (double *) malloc (MAX * sizeof (double));
double *p2 = (double *) malloc (MAX * sizeof (double));
double *v1 = (double *) malloc (MAX * sizeof (double));
double *v2 = (double *) malloc (MAX * sizeof (double));
foo_ref (p1, v1, v2, MAX);
foo (p2, v1, v2, MAX);
check (p1, p2, MAX);
free (p1);
free (p2);
free (v1);
free (v2);
return 0;
}
|
heatx.c
|
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <float.h>
# include <time.h>
# include <omp.h>
#define a(i,j,k) a[(k)*mxy+(j)*mx+(i)]
#define anew(i,j,k) anew[(k)*mxy+(j)*mx+(i)]
int main ( int argc, char *argv[] ) {
double error ;
double epsilon ;
int i,j,k,istep;
int nx,ny,nz,nstep,nprint;
int mx,my,mz,mxy,mxyz ;
int i1,j1,k1,in,jn,kn ;
int i0,j0,k0,im,jm,km ;
double asum,amax,amin ;
double txm,txp,tym,typ,tzm,tzp ;
double *a,*anew;
// clock_t t0,t1,ts,te ;
double t0,t1,ts,te ;
FILE *INPUT ;
istep = 0 ;
error = 999.9 ;
printf("%%heatx, SOP\n") ;
INPUT = fopen("heatx.inp","r");
fscanf(INPUT,"%d %d %d ",&nx,&ny,&nz );
fscanf(INPUT,"%d %d ",&nstep,&nprint );
fscanf(INPUT,"%lf %lf %lf %lf %lf %lf ",&txp,&txm,&typ,&tym,&tzp,&tzm);
fscanf(INPUT,"%le ",&epsilon );
fclose(INPUT) ;
#ifdef _OPENMP
printf("%%heatx, Pure OpenMP Offloading\n") ;
#else
printf("%%heatx, _NO_ OpenMP Offloading\n") ;
#endif
printf("%%heatx, nx ny nz : %d %d %d \n",nx,ny,nz ) ;
printf("%%heatx, nstep nprint : %d %d \n",nstep,nprint ) ;
printf("%%heatx, txp,txm,typ,tym,tzp,tzm: %f %f %f %f %f %f\n",txp,txm,typ,tym,tzp,tzm) ;
printf("%%heatx, epsilon : %e \n",epsilon ) ;
printf("%%heatx, \n" ) ;
i1 = 1 ; j1 = 1 ; k1 = 1 ;
in = nx ; jn = ny ; kn = nz ;
i0 = i1-1 ; j0 = j1-1 ; k0 = k1-1 ;
im = in+1 ; jm = jn+1 ; km = kn+1 ;
mx = im - i0 + 1 ;
my = jm - j0 + 1 ;
mz = km - k0 + 1 ;
mxy = mx*my ;
mxyz = mxy *mz ;
a = (double *) malloc(sizeof(double)*mxyz) ;
anew = (double *) malloc(sizeof(double)*mxyz) ;
for ( k = k1 ; k <= kn ; k++) {
for ( j = j1 ; j <= jn ; j++) {
for ( i = i1 ; i <= in ; i++) {
a(i ,j ,k ) = 0.0 ;
if (k == 1) a(i ,j ,k-1) = tzm ;
if (k == nz) a(i ,j ,k+1) = tzp ;
if (j == 1) a(i ,j-1,k ) = tym ;
if (j == ny) a(i ,j+1,k ) = typ ;
if (i == 1) a(i-1,j ,k ) = txm ;
if (i == nx) a(i+1,j ,k ) = txp ;
}}}
//t0 = clock();
//ts = clock();
t0 = omp_get_wtime();
ts = omp_get_wtime();
#pragma omp target data map(a[:mxyz],anew[:mxyz])
while ( error >= epsilon && istep <= nstep ) {
istep = istep + 1 ;
error = 0.0 ;
#pragma omp target teams distribute parallel for \
reduction(max:error) collapse(3) schedule(static,1)
for( k = k1; k <= kn; k++) {
for( j = j1; j <= jn; j++) {
for( i = i1; i <= in; i++) {
anew(i,j,k) = ( a(i+1,j,k) + a(i,j+1,k) + a(i,j,k+1)
+ a(i-1,j,k) + a(i,j-1,k) + a(i,j,k-1) ) / 6.0 ;
error = fmax( error , fabs(anew(i,j,k)-a(i,j,k)) ) ;
}}}
#pragma omp target teams distribute parallel for \
collapse(3) schedule(static,1)
for( k = k1; k <= kn; k++) {
for( j = j1; j <= jn; j++) {
for( i = i1; i <= in; i++) {
a(i,j,k) = anew(i,j,k) ;
}}}
if ( istep%nprint == 0 || error <= epsilon) {
#pragma omp target update from (a[:mxyz])
// te = clock() ;
te = omp_get_wtime();
asum = 0.0 ;
amax = -2.0 ;
amin = 2.0 ;
for ( k = k1 ; k <= kn ; k++ ) {
for ( j = j1 ; j <= jn ; j++ ) {
for ( i = i1 ; i <= in ; i++ ) {
amin = fmin(amin,a(i,j,k)) ;
amax = fmax(amax,a(i,j,k)) ;
asum = asum+a(i,j,k) ;
}}}
asum = asum /(nx*ny*nz) ;
// printf ("%%heatx, istep %5d error %15.7f asum %15.7f amin %15.7f amax %15.7f time %10.3f \n",istep,error,asum,amin,amax,(te-ts) / (double) CLOCKS_PER_SEC ) ;
printf ("%%heatx, istep %5d error %15.7f asum %15.7f amin %15.7f amax %15.7f time %10.3f \n",istep,error,asum,amin,amax,(te-ts) ) ;
ts = te ;
}
}
//t1 = clock();
t1 = omp_get_wtime();
printf("%%heatx, \n" ) ;
//printf("%%heatx, total time %10.3f\n",(t1-t0) / (double) CLOCKS_PER_SEC ) ;
printf("%%heatx, total time %10.3f\n",(t1-t0) ) ;
printf("%%heatx, EOP\n") ;
return 0;
}
|
pr34694.c
|
/* PR middle-end/34694 */
/* { dg-do compile } */
/* { dg-options "-O -fopenmp -Wall" } */
int i;
void
foo ()
{
#pragma omp parallel
{
int j; /* { dg-message "note: 'j' was declared here" } */
i = j; /* { dg-warning "is used uninitialized" } */
}
}
|
atomic_write_codegen.c
|
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
int main(void) {
// CHECK: store atomic i32 1, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @civ, i32 0, i32 1) monotonic, align 4
#pragma omp atomic write
__imag(civ) = 1;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} release, align 1
#pragma omp atomic write release
cx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
ucx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
sx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
usx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
ix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
uix = uiv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
lx = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
llx = llv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ullx = ullv;
// CHECK: load float, float*
// CHECK: bitcast float {{.*}} to i32
// CHECK: store atomic i32 {{.*}}, i32* bitcast (float* {{.*}} monotonic, align 4
#pragma omp atomic write
fx = fv;
// CHECK: load double, double*
// CHECK: bitcast double {{.*}} to i64
// CHECK: store atomic i64 {{.*}}, i64* bitcast (double* {{.*}} monotonic, align 8
#pragma omp atomic write
dx = dv;
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 [[BITCAST]], i8 0, i64 16, i1 false)
// CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
#pragma omp atomic write
ldx = ldv;
// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = civ;
// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
// CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]]
// CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ float, float }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cfx = cfv;
// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
// CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]]
// CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 16, i8* noundef bitcast ({ double, double }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 5)
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst write
cdx = cdv;
// CHECK: load i8, i8*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} seq_cst, align 1
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write, seq_cst
cx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
lx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} seq_cst, align 4
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst, write
uix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
ix = uiv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i32 %{{.+}}, i32* bitcast (float* {{.*}} monotonic, align 4
#pragma omp atomic write
fx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* {{.*}} monotonic, align 8
#pragma omp atomic write
dx = llv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 [[BITCAST]], i8 0, i64 16, i1 false)
// CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
#pragma omp atomic write
ldx = ullv;
// CHECK: load float, float*
// CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = fv;
// CHECK: load double, double*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
sx = dv;
// CHECK: load x86_fp80, x86_fp80*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = ldv;
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: or i1
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = civ;
// CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
usx = cfv;
// CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
llx = cdv;
// CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}}
// CHECK-DAG: load i8, i8*
// CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic, align 16
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]]
// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic, align 16
// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
int4x[sv] = bv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* noundef [[BITCAST]], i32 noundef 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]],
// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]],
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]],
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* noundef [[BITCAST_TEMP_OLD_BF_ADDR]], i8* noundef [[BITCAST_TEMP_NEW_BF_ADDR]], i32 noundef 0, i32 noundef 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
// CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* noundef [[BITCAST]], i32 noundef 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}},
// CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]],
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
// CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065
// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i24 %{{.+}}, i24* [[TEMP]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* noundef [[BITCAST_TEMP_OLD_BF_ADDR]], i8* noundef [[BITCAST_TEMP_NEW_BF_ADDR]], i32 noundef 0, i32 noundef 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.b = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic relaxed write
bfx4_packed.b = ldv;
// CHECK: load i64, i64*
// CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float
// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic, align 8
// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write relaxed
float2x.x = ulv;
// CHECK: call i32 @llvm.read_register.i32(
// CHECK: sitofp i32 %{{.+}} to double
// CHECK: bitcast double %{{.+}} to i64
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst, align 8
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write seq_cst
dv = rix;
return 0;
}
#endif
|
Parallelizer.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2;
internal::manage_caching_sizes(GetAction, &l1, &l2);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
int volatile sync;
int volatile users;
Index rhs_start;
Index rhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// 1- are we already in a parallel session?
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Index size = transpose ? cols : rows;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index max_threads = std::max<Index>(1,size / 32);
// 3 - compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), max_threads);
if(threads==1)
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession();
if(transpose)
std::swap(rows,cols);
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads) & ~Index(0x7);
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
if(transpose)
func(0, cols, r0, actualBlockRows, info);
else
func(r0, actualBlockRows, 0,cols, info);
}
delete[] info;
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
mypaint-tiled-surface.c
|
/* brushlib - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <[email protected]> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successfull, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
*
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active, float center_x)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15), op->normal*op->opaque*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->lock_alpha*op->opaque*(1<<15));
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float symm_x = self->surface_center_x + (self->surface_center_x - x);
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->surface_center_x = 0.0f;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
GB_assign_zombie1.c
|
//------------------------------------------------------------------------------
// GB_assign_zombie1: delete all entries in C(:,j) for GB_assign
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C(:,j)<!> = anything: GrB_Row_assign or GrB_Col_assign with an empty
// complemented mask requires all entries in the C(:,j) vector to be deleted.
// C must be sparse or hypersparse.
// C->iso is not affected.
#include "GB_assign.h"
#include "GB_assign_zombie.h"
void GB_assign_zombie1
(
GrB_Matrix C,
const int64_t j,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (!GB_PENDING (C)) ;
//--------------------------------------------------------------------------
// get C(:,j)
//--------------------------------------------------------------------------
int64_t *restrict Ci = C->i ;
int64_t pC_start, pC_end, pleft = 0, pright = C->nvec-1 ;
GB_lookup (C->h != NULL, C->h, C->p, C->vlen, &pleft, pright, j,
&pC_start, &pC_end) ;
int64_t cjnz = pC_end - pC_start ;
int64_t nzombies = C->nzombies ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (cjnz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// C(:,j) = empty
//--------------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nzombies)
for (pC = pC_start ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
if (!GB_IS_ZOMBIE (i))
{
// delete C(i,j) by marking it as a zombie
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
|
agent_uid_map.h
|
// -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_AGENT_UID_MAP_H_
#define CORE_CONTAINER_AGENT_UID_MAP_H_
#include <limits>
#include <vector>
#include "core/agent/agent_uid.h"
namespace bdm {
/// AgentUidMap is an associative container that exploits the properties of
/// AgentUid to store data in contigous arrays. Inserting elements and reading
/// elements at the same time is thread-safe as long as the keys are different.
/// These operations with distinct keys are lock-free and atomic free, and thus
/// offer high-performance.
template <typename TValue>
class AgentUidMap {
struct Iterator {
AgentUidMap* map_;
uint64_t idx_;
};
public:
AgentUidMap() {}
AgentUidMap(const AgentUidMap& other)
: data_(other.data_), agent_uid_reused_(other.agent_uid_reused_) {}
explicit AgentUidMap(uint64_t initial_size) {
data_.resize(initial_size);
agent_uid_reused_.resize(initial_size, AgentUid::kReusedMax);
}
void resize(uint64_t new_size) { // NOLINT
data_.resize(new_size);
agent_uid_reused_.resize(new_size, AgentUid::kReusedMax);
}
void clear() { // NOLINT
for (auto& el : agent_uid_reused_) {
el = AgentUid::kReusedMax;
}
}
void ParallelClear() {
#pragma omp parallel for
for (uint64_t i = 0; i < data_.size(); ++i) {
agent_uid_reused_[i] = AgentUid::kReusedMax;
}
}
uint64_t size() const { // NOLINT
return data_.size();
}
void Remove(const AgentUid& key) {
if (key.GetIndex() >= data_.size()) {
return;
}
agent_uid_reused_[key.GetIndex()] = AgentUid::kReusedMax;
}
bool Contains(const AgentUid& uid) const {
auto idx = uid.GetIndex();
if (idx >= data_.size()) {
return false;
}
return uid.GetReused() == agent_uid_reused_[idx];
}
void Insert(const AgentUid& uid, const TValue& value) {
auto idx = uid.GetIndex();
data_[idx] = value;
agent_uid_reused_[idx] = uid.GetReused();
}
const TValue& operator[](const AgentUid& key) const {
return data_[key.GetIndex()];
}
typename AgentUid::Reused_t GetReused(uint64_t index) const {
return agent_uid_reused_[index];
}
private:
std::vector<TValue> data_;
std::vector<typename AgentUid::Reused_t> agent_uid_reused_;
};
} // namespace bdm
#endif // CORE_CONTAINER_AGENT_UID_MAP_H_
|
wots.c
|
#include <openssl/evp.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <gmp.h>
typedef struct{
unsigned char value[EVP_MAX_MD_SIZE];
unsigned int len;
} KeyBlock; /* Hash output */
typedef struct{
int w, n, t, t1, t2;
KeyBlock* X;
KeyBlock* Y;
} WKP; /* WOTS Key Pair */
/**
* Randomize bytes in KeyBlock using rand()
*/
void randomize_block(KeyBlock* block, unsigned int len) {
for (unsigned int i = 0; i < len; i++) {
block->value[i] = rand();
}
block->len = len;
}
/**
* hash bytes n times, recursively.
*/
void hash(char* hashname, KeyBlock* in, KeyBlock* out, unsigned int n) {
const EVP_MD* md = EVP_get_digestbyname(hashname);
if(!md) {
printf("Unknown message digest %s\n", hashname);
exit(1);
}
EVP_MD_CTX* mdctx = EVP_MD_CTX_new();
memcpy(out->value, in->value, in->len);
for( unsigned int i = 1; i < n; i++) {
EVP_DigestInit_ex(mdctx, md, NULL);
EVP_DigestUpdate(mdctx, out->value, in->len);
EVP_DigestFinal_ex(mdctx, out->value, &out->len);
if(!EVP_MD_CTX_reset(mdctx)) {
printf("Could not reset md context.\n");
exit(1);
}
}
EVP_MD_CTX_free(mdctx);
}
void print_bytes(unsigned char* bytes, int len) {
for (int i = 0; i < len; i++) {
printf("%.2X", bytes[i]);
}
}
void get_wots_exponents(unsigned char* bytes, int blen, unsigned int* exp, int explen) {
//create bigint with bytes
mpz_t bi;
mpz_init(bi);
char strbytes[(2*blen)+1];
char * aux = strbytes;
for(int i = 0; i < blen; i++){
aux += sprintf(aux, "%.2X", bytes[i]);
}
mpz_set_str(bi, strbytes, 16);
//create bitmask of size explen
mpz_t mask;
mpz_init(mask);
mpz_set_ui(mask, 1);
mpz_mul_2exp(mask, mask, explen);
mpz_sub_ui(mask, mask, 1);
//assign exponents
mpz_t ret;
mpz_init(ret);
int t = ((blen*8) + explen -1)/explen;
for(int i = 0; i < t; i++) {
mpz_and(ret, bi, mask);
exp[i] = (unsigned int) mpz_get_ui(ret);
mpz_tdiv_q_2exp(bi,bi,explen);
}
mpz_clear(bi);
}
/**
* Generates the signature key X using parameters in WKP
*/
void generate_sig_key(WKP* kp) {
if(!kp->t){
printf("use setup(WKP*)");
exit(1);
}
kp->X = malloc(kp->t * sizeof(KeyBlock));
#ifdef PARALLEL
#pragma omp parallel for
#endif
for(int i = 0; i < kp->t; i++) {
randomize_block(&kp->X[i], kp->n/8);
}
}
/**
* Generates the verification key X using parameters in WKP
*/
void generate_ver_key(WKP* kp) {
if(!kp->t){
printf("use setup(WKP*)");
exit(1);
}
kp->Y = malloc(kp->t * sizeof(KeyBlock));
int exp = 1 << kp->w;
double time = omp_get_wtime();
#ifdef PARALLEL
#pragma omp parallel for
#endif
for(int i = 0; i < kp->t; i++) {
hash("sha256", &kp->X[i], &kp->Y[i], exp);
}
time = time - omp_get_wtime();
printf(">>Benchmark: %f usecs in %s\n", time, __func__);
}
/**
* Setup the WKP struct with default parameters.
* Initializes Signature key and Verification key
*/
void setup(WKP* kp) {
kp->w = 16;
kp->n = 256;
kp->t = 18;
kp->t1 = 16;
kp->t2 = 2;
generate_sig_key(kp);
generate_ver_key(kp);
}
void WOTS_SIGN(unsigned char* bytes, WKP* kp) {
}
void WOTS_VERIFY(){}
/**
* Print util function
**/
void print_wkp(WKP* kp) {
printf("WKP Params:\nw=%d n=%d t=%d, t1=%d t2=%d\n",
kp->w, kp->n, kp->t, kp->t1, kp->t2);
printf("--- Signature Key ---\n");
for(int i = 0; i < kp->t; i++){
printf("X%d:\t",i);
for(int h = 0; h < kp->X[i].len; h++) {
printf("%.2X", kp->X[i].value[h]);
}
printf("\t len: %d Bytes\n", kp->X[i].len);
}
printf("--- Verification Key ---\n");
for(int i = 0; i < kp->t; i++){
printf("Y%d:\t",i);
for(int h = 0; h < kp->Y[i].len; h++) {
printf("%.2X", kp->Y[i].value[h]);
}
printf("\t len: %d Bytes\n", kp->Y[i].len);
}
}
|
enhance.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, [email protected] in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
lut[NumberCLAHEGrays];
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaCorrectImageTag,progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
master_taskloop_misc_messages.c
|
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop foo
void test_no_clause() {
int i;
#pragma omp master taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop' must be a for loop}}
#pragma omp master taskloop
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp master taskloop'}}
#pragma omp parallel
#pragma omp master taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
GB_extract_vector_list.c
|
//------------------------------------------------------------------------------
// GB_extract_vector_list: extract vector indices for all entries in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Constructs a list of vector indices for each entry in a matrix. Creates
// the output J for GB_extractTuples, and I for GB_transpose when the qsort
// method is used.
// TODO: use #include "GB_positional_op_ijp.c" here
#include "GB_ek_slice.h"
#define GB_FREE_ALL \
{ \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
GrB_Info GB_extract_vector_list // extract vector list from a matrix
(
// output:
int64_t *restrict J, // size nnz(A) or more
// input:
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (J != NULL) ;
ASSERT (A != NULL) ;
ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed
ASSERT (GB_ZOMBIES_OK (A)) ; // pattern not accessed
ASSERT (!GB_IS_BITMAP (A)) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
int A_ntasks, A_nthreads ;
GB_SLICE_MATRIX (A, 2, chunk) ;
//--------------------------------------------------------------------------
// extract the vector index for each entry
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, avlen) ;
//------------------------------------------------------------------
// extract vector indices of A(:,j)
//------------------------------------------------------------------
for (int64_t p = pA_start ; p < pA_end ; p++)
{
J [p] = j ;
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__ainv_uint8_uint8.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint8
// op(A') function: GB_tran__ainv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_fc64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc64)
// A*D function (colscale): GB (_AxD__minus_fc64)
// D*A function (rowscale): GB (_DxB__minus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc64)
// C=scalar+B GB (_bind1st__minus_fc64)
// C=scalar+B' GB (_bind1st_tran__minus_fc64)
// C=A+scalar GB (_bind2nd__minus_fc64)
// C=A'+scalar GB (_bind2nd_tran__minus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_minus (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_minus (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FC64 || GxB_NO_MINUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_minus (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_minus (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_minus (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_minus (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint32)
// A*D function (colscale): GB (_AxD__ne_uint32)
// D*A function (rowscale): GB (_DxB__ne_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint32)
// C=scalar+B GB (_bind1st__ne_uint32)
// C=scalar+B' GB (_bind1st_tran__ne_uint32)
// C=A+scalar GB (_bind2nd__ne_uint32)
// C=A'+scalar GB (_bind2nd_tran__ne_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_print_results.c
|
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags )
{
int num_threads;
char *num_threads_set;
/* figure out number of threads used */
#pragma omp parallel shared(num_threads)
{
#pragma omp master
num_threads = omp_get_num_threads();
}
num_threads_set = getenv("OMP_NUM_THREADS");
if (!num_threads_set) num_threads_set = "unset";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", class );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx %3dx %3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2f\n", t );
printf( " Total threads = %12d\n", num_threads);
printf( " Request threads = %12s\n", num_threads_set);
printf( " Mop/s total = %12.2f\n", mops );
printf( " Mop/s/thread = %12.2f\n",
mops/(double)num_threads );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( "\n\n" );
printf( " Please send all errors/feedbacks to:\n\n" );
printf( " NPB Development Team\n" );
printf( " [email protected]\n\n" );
/* printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: [email protected]\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 650-604-3957\n\n" ); */
}
|
3d7pt.c
|
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
sageInterface.h
|
#ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
#ifdef ROSE_BUILD_BINARY_ANALYSIS_SUPPORT
//! Find the main interpretation.
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
#endif
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// DQ (8/5/2020): the "using namespace" directive will not hide existing visability of symbols in resolving visability.
// So we need to test if a symbol is visible exclusing matching alises due to using direectives before we can decide to
// persue name space qualification. This is best demonstrated by Cxx_tests/test2020_18.C, test2020_19.C, test2020_20.C,
// and test2020_21.C.
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopesIgnoringAliasSymbols (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Recursively print current and parent nodes. used within gdb to probe the context of a node.
void recursivePrintCurrentAndParent (SgNode* n) ;
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
// DQ (8/12/2020): Check the access permissions of all defining and nodefining declarations.
void checkAccessPermissions ( SgNode* );
// DQ (8/14/2020): Check the symbol tables for specific scopes (debugging support).
void checkSymbolTables ( SgNode* );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_Ada_language ();
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_Cobol_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Jovial_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
ROSE_DLL_API bool is_language_case_insensitive ();
ROSE_DLL_API bool language_may_contain_nondeclarations_in_scope ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
#if 0
printf ("Top of SageInterface::querySubTree() \n");
#endif
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
#if 0
printf ("In SageInterface::querySubTree(): before initialization loop \n");
#endif
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count)
{
#if 0
printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count);
#endif
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
#if 0
printf ("Leaving SageInterface::querySubTree(): after initialization loop \n");
#endif
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#define DEBUG_GET_ENCLOSING_NODE 0
#if 1
// DQ (12/31/2019): This version does not detect a cycle that Robb's version detects in processing Cxx11_tests/test2016_23.C.
// This will have to be investigated seperately from the issue I am working on currently.
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
int counter = 0;
#if DEBUG_GET_ENCLOSING_NODE
printf ("In getEnclosingNode(): previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if DEBUG_GET_ENCLOSING_NODE
printf (" --- parent = %p = %s \n",parent,parent->class_name().c_str());
printf (" --- --- parent->get_parent() = %p = %s \n",parent->get_parent(),parent->get_parent()->class_name().c_str());
#endif
#if 1
// DQ (1/8/2020): ROSE-82 (on RZ) This limit needs to be larger and increasing it to 500 was enough
// for a specific code with a long chain of if-then-else nesting, So to make this sufficent for more
// general code we have increased the lomit to 100,000. Note that 50 was not enough for real code,
// but was enough for our regression tests.
// DQ (12/30/2019): This is added to support detection of infinite loops over parent pointers.
// if (counter >= 500)
if (counter >= 100000)
{
printf ("Exiting: In getEnclosingNode(): loop limit exceeded: counter = %d \n",counter);
ROSE_ASSERT(false);
}
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
counter++;
}
#if DEBUG_GET_ENCLOSING_NODE
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if DEBUG_GET_ENCLOSING_NODE
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
// DQ (12/30/2019): Provide more detail in error message.
if (seen.insert(node).second == false)
{
printf ("Error: node is already in set and defines a cycle: node = %p = %s \n",node,node->class_name().c_str());
std::set<const SgNode*>::const_iterator i = seen.begin();
while (i != seen.end())
{
const SgNode* element = *i;
printf (" --- seen element: element = %p = %s \n",element,element->class_name().c_str());
i++;
}
printf ("Exiting after error! \n");
ROSE_ASSERT(false);
}
// ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
//! Get the closest class declaration enclosing the specified AST node,
ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode );
// DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters.
//! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions).
ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false);
// DQ (2/7/2019): Need a function to return when an expression is in an expression subtree.
// This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well.
ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp);
// DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp.
ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp );
// DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes.
// DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions.
ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp);
ROSE_DLL_API bool isAddressTaken(SgExpression* refExp);
// DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references.
ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp);
// DQ (2/15/2019): Adding support for detecting which class a member reference is being made from.
// ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp);
ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp);
ROSE_DLL_API std::set<SgNode*> getFrontendSpecificNodes();
// DQ (2/17/2019): Display the shared nodes in the AST for debugging.
ROSE_DLL_API void outputSharedNodes( SgNode* node );
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
// DQ (11/15/2018): Adding support for traversals over the include file tree.
//! return path prefix for subtree of include files.
void listHeaderFiles ( SgIncludeFile* includeFile );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope);
// DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h.
ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope );
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
// DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing.
//! If header file unparsing and token-based unparsing are used, then some statements in header files
//! used with the same name and different include syntax can't be transformed. This is currently because
//! there is no way to generally test the resulting transformed code generated by ROSE.
ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt);
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
// DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree.
// We might have to make this specific to a file (only traversing the functions in that file).
/*!\brief XXX
* This function operates on the new file used to support outlined function definitions.
* We use a copy of the file where the code will be outlined FROM, so that if there are references to
* declarations in the outlined code we can support the outpiled code with those references. This
* approach has the added advantage of also supporting the same include file tree as the original
* file where the outlined code is being taken from.
*/
ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node);
// DQ (11/10/2019): Lower level support for convertFunctionDefinitionsToFunctionPrototypes().
ROSE_DLL_API void replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration );
ROSE_DLL_API std::vector<SgFunctionDeclaration*> generateFunctionDefinitionsList(SgNode* node);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value,
SgLabelSymbol::label_type_enum label_type=SgLabelSymbol::e_start_label_type,
SgScopeStatement* label_scope=NULL);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
// DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified.
//! Use the set of IR nodes and set the isModified flag in each IR node to true.
ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet);
// DQ (10/23/2018): Report nodes that are marked as modified.
ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node);
// DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes.
ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n );
ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope );
ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope);
ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode );
ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo );
// DQ (12/2/2019): Debugging support.
ROSE_DLL_API void outputFileIds( SgNode* node );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'.
Change reference to 'from' to use this new variable.
Assumptions: (1)'from' is not within the test of a loop or 'if';
(2)not currently traversing 'from' or the statement it is in.
Return value: the new temp variable declaration's assign initializer containing the from expression.
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
void detectCycleInType(SgType * type, const std::string & from);
// DQ (7/14/2020): Debugging support.
void checkForInitializers( SgNode* node );
}// end of namespace
#endif
|
rumi6r.c
|
/*
* Date: 11 December 2015
* Contact: Thomas Peyrin - [email protected]
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: [email protected]
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 4 // Number of parallel threads utilized in this program
#define NumOfExperiments 8 // Number of independent experiments
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
// unsigned int initial_seed = 0x5ED90662;
// unsigned int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 6; // Number of rounds
int ver = 5; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000004000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300";
char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 5;
int deg2 = 17;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg)
int N3 = 1 << deg2; // Number of queries per bunche : N3
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
return 0;
}
|
DenseMatrix.h
|
//=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/Functions.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/traits/SubmatrixExprTrait.h>
#include <blaze/math/typetraits/AreSIMDCombinable.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/logging/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/typetraits/IsSame.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && AreSIMDCombinable<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
GB_binop__copysign_fp64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp64)
// C=scalar+B GB (_bind1st__copysign_fp64)
// C=scalar+B' GB (_bind1st_tran__copysign_fp64)
// C=A+scalar GB (_bind2nd__copysign_fp64)
// C=A'+scalar GB (_bind2nd_tran__copysign_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = copysign (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = copysign (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COPYSIGN || GxB_NO_FP64 || GxB_NO_COPYSIGN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__copysign_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__copysign_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__copysign_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = copysign (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__copysign_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = copysign (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__copysign_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ctl_list.c
|
/********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : ctl_list.c
* Description : list control
*
* + This is part of libaroma, an embedded ui toolkit.
* + 04/03/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_ctl_list_c__
#define __libaroma_ctl_list_c__
#include <aroma_internal.h>
#include "../ui/ui_internal.h"
/* SCROLL CONTROL HANDLER */
dword _libaroma_ctl_list_message(
LIBAROMA_CONTROLP, LIBAROMA_CTL_SCROLL_CLIENTP,LIBAROMA_MSGP,int,int);
void _libaroma_ctl_list_destroy(
LIBAROMA_CONTROLP, LIBAROMA_CTL_SCROLL_CLIENTP);
byte _libaroma_ctl_list_thread(
LIBAROMA_CONTROLP, LIBAROMA_CTL_SCROLL_CLIENTP);
void _libaroma_ctl_list_draw(
LIBAROMA_CONTROLP, LIBAROMA_CTL_SCROLL_CLIENTP, LIBAROMA_CANVASP,
int, int, int, int);
static LIBAROMA_CTL_SCROLL_CLIENT_HANDLER _libaroma_ctl_list_handler={
message:_libaroma_ctl_list_message,
draw:_libaroma_ctl_list_draw,
destroy:_libaroma_ctl_list_destroy,
thread:_libaroma_ctl_list_thread
};
/* list structure */
typedef struct {
int h;
int hpad;
int vpad;
int itemn;
byte flags;
LIBAROMA_CTL_LIST_ITEMP first;
LIBAROMA_CTL_LIST_ITEMP last;
LIBAROMA_CTL_LIST_ITEMP touched;
LIBAROMA_CTL_LIST_ITEMP focused;
int threadn;
LIBAROMA_CTL_LIST_ITEMP * threads;
LIBAROMA_MUTEX mutex;
LIBAROMA_MUTEX imutex;
LIBAROMA_CTL_LIST_TOUCHPOS pos;
} LIBAROMA_CTL_LIST, * LIBAROMA_CTL_LISTP;
/*
* Function : __libaroma_ctl_list_item_reg_thread
* Return Value: byte
* Descriptions: register item thread
*/
byte __libaroma_ctl_list_item_reg_thread(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_LIST_ITEMP item){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
/*
if (!item->handler->message){
ALOGW("item_reg_thread item doesn't have message handler");
return 0;
}
*/
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (mi->threadn==0){
mi->threads = (LIBAROMA_CTL_LIST_ITEMP *)
malloc(sizeof(LIBAROMA_CTL_LIST_ITEMP));
mi->threads[0] = item;
mi->threadn=1;
}
else{
int i;
for (i=0;i<mi->threadn;i++){
if (mi->threads[i]==item){
/* already registered */
return 2;
}
}
LIBAROMA_CTL_LIST_ITEMP * new_threads = (LIBAROMA_CTL_LIST_ITEMP *)
realloc(mi->threads, sizeof(LIBAROMA_CTL_LIST_ITEMP)*(mi->threadn+1));
if (!new_threads){
if (mi->threads){
free(mi->threads);
}
mi->threads=NULL;
mi->threadn=0;
ALOGW("item_reg_thread cannot realloc threads");
return 0;
}
mi->threads=new_threads;
mi->threads[mi->threadn]=item;
mi->threadn++;
}
return 1;
} /* End of __libaroma_ctl_list_item_reg_thread */
/*
* Function : __libaroma_ctl_list_item_unreg_thread
* Return Value: byte
* Descriptions: unregister item thread
*/
byte __libaroma_ctl_list_item_unreg_thread(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_LIST_ITEMP item){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (mi->threadn<1){
return 0;
}
if (mi->threadn==1){
if (mi->threads[0]==item){
free(mi->threads);
mi->threads=NULL;
mi->threadn=0;
return 1;
}
return 0;
}
LIBAROMA_CTL_LIST_ITEMP * new_threads = (LIBAROMA_CTL_LIST_ITEMP *)
malloc(sizeof(LIBAROMA_CTL_LIST_ITEMP)*(mi->threadn-1));
if (!new_threads){
ALOGW("item_unreg_thread cannot allocate new threads");
if (mi->threads){
free(mi->threads);
}
mi->threads=NULL;
mi->threadn=0;
return 0;
}
int i;
int z=0;
int n=-1;
for (i=0;i<mi->threadn;i++){
if (mi->threads[i]==item){
n=i;
}
else{
new_threads[z++]=mi->threads[i];
}
}
if (n>-1){
free(mi->threads);
mi->threads=new_threads;
mi->threadn--;
return 1;
}
free(new_threads);
ALOGV("item_unreg_thread item is unregistered");
return 0;
} /* End of __libaroma_ctl_list_item_unreg_thread */
/*
* Function : _libaroma_ctl_list_draw_item_fresh
* Return Value: void
* Descriptions: fresh item drawing
*/
void _libaroma_ctl_list_draw_item_fresh(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_LIST_ITEMP item,
LIBAROMA_CANVASP canvas,
word bgcolor,
int hpad,
byte flag
){
if ((!canvas)||(!item)||(!ctl)) {
return;
}
/* cleanup */
if (!(flag&LIBAROMA_CTL_LIST_ITEM_DRAW_ADDONS)){
libaroma_canvas_setcolor(canvas,bgcolor,0xff);
}
LIBAROMA_CANVASP tcanvas=NULL;
LIBAROMA_CANVASP ccv = canvas;
/* have horizontal padding */
if (hpad>0){
tcanvas = libaroma_canvas_area(
canvas, hpad, 0, canvas->w-hpad*2, canvas->h
);
if (tcanvas){
ccv = tcanvas;
}
}
/* draw directly */
if (item->handler->draw!=NULL){
item->handler->draw(ctl,item,ccv,bgcolor,
flag);
}
if (tcanvas){
libaroma_canvas_free(tcanvas);
}
} /* End of _libaroma_ctl_list_draw_item_fresh */
/*
* Function : _libaroma_ctl_list_free_state
* Return Value: void
* Descriptions: free state
*/
void _libaroma_ctl_list_free_state(LIBAROMA_CTL_LIST_ITEMP item){
if (item->state!=NULL){
if (item->state->cache_rest){
libaroma_canvas_free(item->state->cache_rest);
}
if (item->state->cache_push){
libaroma_canvas_free(item->state->cache_push);
}
if (item->state->cache_client){
libaroma_canvas_free(item->state->cache_client);
}
free(item->state);
ALOGT("[X] State Freed %x",item->id);
}
item->state=NULL;
} /* End of _libaroma_ctl_list_free_state */
/*
* Function : _libaroma_ctl_list_new_state
* Return Value: byte
* Descriptions: create new state
*/
byte _libaroma_ctl_list_init_state(
LIBAROMA_CTL_LIST_ITEMP item
){
if (item->state==NULL){
item->state =
(LIBAROMA_CTL_LIST_ITEM_STATEP) calloc(sizeof(
LIBAROMA_CTL_LIST_ITEM_STATE
),1);
if (!item->state){
ALOGW("list_new_state alloc memory failed");
return 0;
}
ALOGT("[0] State Created %x",item->id);
return 1;
}
return 2;
} /* End of _libaroma_ctl_list_new_state */
/*
* Function : _libaroma_ctl_list_init_state_cache
* Return Value: byte
* Descriptions: init cache canvases
*/
byte _libaroma_ctl_list_init_state_cache(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_LIST_ITEMP item
){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (item->state){
if (!item->state->cache_rest){
item->state->cache_rest=libaroma_canvas(ctl->w,item->h);
}
if (!item->state->cache_push){
item->state->cache_push=libaroma_canvas(ctl->w,item->h);
}
word bgcolor = libaroma_ctl_scroll_get_bg_color(ctl);
if (item->state->cache_rest){
_libaroma_ctl_list_draw_item_fresh(
ctl,item,item->state->cache_rest,bgcolor,mi->hpad,
LIBAROMA_CTL_LIST_ITEM_DRAW_NORMAL|LIBAROMA_CTL_LIST_ITEM_DRAW_CACHE
);
}
if (item->state->cache_push){
_libaroma_ctl_list_draw_item_fresh(
ctl,item,item->state->cache_push,bgcolor,mi->hpad,
LIBAROMA_CTL_LIST_ITEM_DRAW_PUSHED|LIBAROMA_CTL_LIST_ITEM_DRAW_CACHE
);
}
return 1;
}
return 0;
} /* End of _libaroma_ctl_list_init_state_cache */
/*
* Function : _libaroma_ctl_list_draw_item
* Return Value: void
* Descriptions: draw item
*/
void _libaroma_ctl_list_draw_item(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_LIST_ITEMP item,
LIBAROMA_CANVASP canvas,
word bgcolor
){
if ((!item)||(!canvas)){
return;
}
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
/* normal animation handler */
if (item->state){
if (item->state->normal_handler){
libaroma_draw(canvas, item->state->cache_rest, 0, 0, 0);
int ripple_i = 0;
int ripple_p = 0;
while(libaroma_ripple_loop(&item->state->ripple,&ripple_i,&ripple_p)){
int x=0;
int y=(item->y+libaroma_dp(1));
int size=0;
byte push_opacity=0;
byte ripple_opacity=0;
if (libaroma_ripple_calculation(
&item->state->ripple, canvas->w, canvas->h,
&push_opacity, &ripple_opacity,
&x, &y, &size, ripple_p
)){
libaroma_draw_opacity(canvas,
item->state->cache_push, 0, 0, 2,
(byte) push_opacity
);
libaroma_draw_mask_circle(
canvas,
item->state->cache_push,
x, y,
x, y,
size,
ripple_opacity
);
}
}
if (item->state->normal_handler==2){
_libaroma_ctl_list_draw_item_fresh(
ctl, item,canvas,bgcolor,mi->hpad,
LIBAROMA_CTL_LIST_ITEM_DRAW_ADDONS
);
}
return;
}
}
/* draw fresh */
_libaroma_ctl_list_draw_item_fresh(
ctl, item,canvas,bgcolor,mi->hpad, LIBAROMA_CTL_LIST_ITEM_DRAW_NORMAL
);
} /* End of _libaroma_ctl_list_draw_item */
/*
* Function : _libaroma_ctl_list_dodraw_item
* Return Value: byte
* Descriptions: do draw item directly
*/
byte _libaroma_ctl_list_dodraw_item(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_LIST_ITEMP item
){
if (!item){
return 0;
}
byte res=0;
if (libaroma_ctl_scroll_is_visible(ctl,item->y,item->h)){
word bgcolor = libaroma_ctl_scroll_get_bg_color(ctl);
LIBAROMA_CANVASP canvas=libaroma_canvas(ctl->w,item->h);
if (canvas!=NULL){
_libaroma_ctl_list_draw_item(
ctl,item,canvas,bgcolor
);
res=libaroma_ctl_scroll_blit(
ctl,
canvas,
0, item->y, canvas->w, canvas->h,
0
);
libaroma_canvas_free(canvas);
}
}
return res;
} /* End of _libaroma_ctl_list_dodraw_item */
/*
* Function : _libaroma_ctl_list_thread
* Return Value: byte
* Descriptions: scroll list thread
*/
byte _libaroma_ctl_list_thread(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_CLIENTP client){
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
byte need_redraw=0;
int i;
libaroma_mutex_lock(mi->imutex);
libaroma_mutex_lock(mi->mutex);
int num_thread = mi->threadn;
if (num_thread>0){
LIBAROMA_CTL_LIST_ITEMP * threads = (LIBAROMA_CTL_LIST_ITEMP *) malloc(
sizeof(LIBAROMA_CTL_LIST_ITEM)*num_thread);
for (i=0;i<num_thread;i++){
threads[i]=mi->threads[i];
}
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<num_thread;i++){
LIBAROMA_CTL_LIST_ITEMP item = threads[i];
if (item){
byte unreg_me=0;
byte is_draw=0;
if (item->state){
/* normal behaviour */
if (item->state->normal_handler){
byte res = libaroma_ripple_thread(&item->state->ripple, 0);
if (res&LIBAROMA_RIPPLE_REDRAW){
is_draw = 1;
}
if (res&LIBAROMA_RIPPLE_HOLDED){
if (item->handler->message){
int cx = 0; int cy=0;
LIBAROMA_CTL_LIST_TOUCHPOSP pos = libaroma_ctl_list_getpos(ctl);
if (pos){
cy = pos->last_y - item->y;
cx = pos->last_x;
}
byte msgret=item->handler->message(
ctl,
item,
LIBAROMA_CTL_LIST_ITEM_MSG_TOUCH_HOLDED,
LIBAROMA_CTL_LIST_ITEM_MSGPARAM_HOLDED,
cx, cy
);
if (msgret&LIBAROMA_CTL_LIST_ITEM_MSGRET_NEED_DRAW){
is_draw=1;
}
if (msgret&LIBAROMA_CTL_LIST_ITEM_MSGRET_UNREG_THREAD){
unreg_me=1;
}
}
}
if (res&LIBAROMA_RIPPLE_RELEASED){
_libaroma_ctl_list_free_state(item);
unreg_me=1;
}
}
}
if (item->handler->message){
byte msgret=item->handler->message(
ctl,
item,
LIBAROMA_CTL_LIST_ITEM_MSG_THREAD,
libaroma_ctl_scroll_is_visible(ctl,item->y,item->h),
0,
0
);
if (msgret&LIBAROMA_CTL_LIST_ITEM_MSGRET_NEED_DRAW){
is_draw=1;
}
if (msgret&LIBAROMA_CTL_LIST_ITEM_MSGRET_UNREG_THREAD){
unreg_me=1;
}
}
if (is_draw){
if (_libaroma_ctl_list_dodraw_item(ctl,item)){
need_redraw=1;
}
}
/* unreg thread */
if (!unreg_me){
threads[i] = NULL;
}
}
}
/* unreg threads */
for (i=0;i<num_thread;i++){
LIBAROMA_CTL_LIST_ITEMP item = threads[i];
if (item!=NULL){
if (!(item->flags&LIBAROMA_CTL_LIST_ITEM_REGISTER_THREAD)){
__libaroma_ctl_list_item_unreg_thread(ctl, item);
}
}
}
free(threads);
}
libaroma_mutex_unlock(mi->mutex);
libaroma_mutex_unlock(mi->imutex);
return need_redraw;
} /* End of _libaroma_ctl_list_thread */
/*
* Function : _libaroma_ctl_list_item_by_y
* Return Value: void
* Descriptions: get item by y position
*/
LIBAROMA_CTL_LIST_ITEMP _libaroma_ctl_list_item_by_y(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_SCROLL_CLIENTP client,
int y){
if (client->handler!=&_libaroma_ctl_list_handler){
return NULL;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
/* find first item */
LIBAROMA_CTL_LIST_ITEMP f = mi->first;
while(f){
if ((f->y<=y)&&(f->y+f->h>y)){
return f;
}
f = f->next;
}
return NULL;
} /* End of _libaroma_ctl_list_item_by_y */
/*
* Function : _libaroma_ctl_list_draw
* Return Value: void
* Descriptions: draw routine
*/
void _libaroma_ctl_list_draw(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_SCROLL_CLIENTP client,
LIBAROMA_CANVASP cv,
int x, int y, int w, int h){
if (client->handler!=&_libaroma_ctl_list_handler){
return;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (y<mi->vpad){
libaroma_draw_rect(
cv, 0, 0, w, mi->vpad-y,
libaroma_ctl_scroll_get_bg_color(ctl),
0xff
);
}
if (y+h>mi->h-mi->vpad){
int dh=(y+h)-(mi->h-mi->vpad);
libaroma_draw_rect(
cv, 0, h-dh, w, dh,
libaroma_ctl_scroll_get_bg_color(ctl),
0xff
);
}
libaroma_mutex_lock(mi->imutex);
/* find first item */
int current_index = 0;
LIBAROMA_CTL_LIST_ITEMP f = mi->first;
while(f){
if (f->y+f->h>y){
break;
}
f = f->next;
current_index++;
}
word bgcolor = libaroma_ctl_scroll_get_bg_color(ctl);
/* draw routine */
LIBAROMA_CTL_LIST_ITEMP item = f;
while(item){
if (item->y>=y+h){
break;
}
LIBAROMA_CANVASP canvas=NULL;
byte is_area=0;
if ((item->y>=y)&&(item->y+item->h<y+cv->h)){
canvas = libaroma_canvas_area(cv,0,item->y-y,w,item->h);
is_area=1;
}
else{
canvas = libaroma_canvas(w,item->h);
}
if (canvas!=NULL){
_libaroma_ctl_list_draw_item(
ctl, item, canvas, bgcolor
);
/* blit into working canvas */
if (!is_area){
libaroma_draw(cv,canvas,0,item->y-y,0);
}
libaroma_canvas_free(canvas);
}
item=item->next;
current_index++;
}
libaroma_mutex_unlock(mi->imutex);
} /* End of _libaroma_ctl_list_draw */
/*
* Function : _libaroma_ctl_list_scroll_message
* Return Value: dword
* Descriptions: handle scroll message
*/
dword _libaroma_ctl_list_scroll_message(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_SCROLL_CLIENTP client,
LIBAROMA_CTL_LISTP mi,
int msg,
int param,
int x,
int y){
switch(msg){
case LIBAROMA_CTL_SCROLL_MSG_ISNEED_TOUCH:{
if ((y<mi->vpad)||(y>mi->h-mi->vpad)){
ALOGT("list_scroll_message ISNEED(%i,%i) not needed",x,y);
/* no need touch handle */
return 0;
}
LIBAROMA_CTL_LIST_ITEMP item=
_libaroma_ctl_list_item_by_y(ctl, client, y);
if (item){
if (item->flags&LIBAROMA_CTL_LIST_ITEM_RECEIVE_TOUCH){
mi->touched = item;
ALOGT("list_scroll_message ISNEED(%i,%i) needed",x,y);
return LIBAROMA_CTL_SCROLL_MSG_HANDLED;
}
}
ALOGT("list_scroll_message ISNEED(%i,%i) item don't use touch",x,y);
return 0;
}
break;
case LIBAROMA_CTL_SCROLL_MSG_TOUCH_DOWN:{
ALOGT("list_scroll_message TOUCH_DOWN(%i,%i)",x,y);
mi->pos.start_x=x;
mi->pos.start_y=y;
mi->pos.last_x=x;
mi->pos.last_y=y;
byte retval = 0;
if (mi->touched!=NULL){
byte mres = 0;
if (mi->touched->handler->message){
int cy = y - mi->touched->y;
mres=mi->touched->handler->message(
ctl,
mi->touched,
LIBAROMA_CTL_LIST_ITEM_MSG_TOUCH_DOWN,
0,
x, cy
);
}
if (!(mres&LIBAROMA_CTL_LIST_ITEM_MSGRET_HANDLED)){
/* init state item */
if (_libaroma_ctl_list_init_state(mi->touched)){
_libaroma_ctl_list_init_state_cache(ctl,mi->touched);
if (mres&LIBAROMA_CTL_LIST_ITEM_MSGRET_HAVE_ADDONS_DRAW){
mi->touched->state->normal_handler = 2;
}
else{
mi->touched->state->normal_handler = 1;
}
libaroma_mutex_lock(mi->mutex);
libaroma_ripple_down(&mi->touched->state->ripple, x, y);
__libaroma_ctl_list_item_reg_thread(ctl, mi->touched);
libaroma_mutex_unlock(mi->mutex);
}
}
else if(mi->touched->state){
mi->touched->state->normal_handler = 0;
}
if (mres&LIBAROMA_CTL_LIST_ITEM_MSGRET_NEED_DRAW){
if (_libaroma_ctl_list_dodraw_item(ctl,mi->touched)){
retval=LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW;
}
}
}
return retval;
}
break;
case LIBAROMA_CTL_SCROLL_MSG_TOUCH_MOVE:{
ALOGT("list_scroll_message TOUCH_MOVE(%i,%i)",x,y);
mi->pos.last_x=x;
mi->pos.last_y=y;
byte retval = 0;
if (mi->touched!=NULL){
byte mres = 0;
if (mi->touched->handler->message){
int cy = y - mi->touched->y;
mres=mi->touched->handler->message(
ctl,
mi->touched,
LIBAROMA_CTL_LIST_ITEM_MSG_TOUCH_MOVE,
0,
x, cy
);
}
if (mi->touched->state){
libaroma_ripple_move(&mi->touched->state->ripple,x,y);
}
if (mres&LIBAROMA_CTL_LIST_ITEM_MSGRET_NEED_DRAW){
if (_libaroma_ctl_list_dodraw_item(ctl,mi->touched)){
retval=LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW;
}
}
}
return retval;
}
break;
case LIBAROMA_CTL_SCROLL_MSG_TOUCH_UP:
case LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL:{
ALOGT_IF(msg==LIBAROMA_CTL_SCROLL_MSG_TOUCH_UP,
"list_scroll_message TOUCH_UP(%i,%i)",x,y);
ALOGT_IF(msg==LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL,
"list_scroll_message TOUCH_CANCEL(%i,%i)",x,y);
mi->pos.last_x=x;
mi->pos.last_y=y;
byte retval = 0;
if (mi->touched!=NULL){
dword param_msg = 0;
if (mi->touched->state){
if (msg==LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL){
libaroma_ripple_cancel(&mi->touched->state->ripple);
}
else{
byte res = libaroma_ripple_up(&mi->touched->state->ripple,0);
if (res&LIBAROMA_RIPPLE_HOLDED){
param_msg = LIBAROMA_CTL_LIST_ITEM_MSGPARAM_HOLDED;
}
}
}
byte mres = 0;
if (mi->touched->handler->message){
int cy = y - mi->touched->y;
mres=mi->touched->handler->message(
ctl,
mi->touched,
(msg==LIBAROMA_CTL_SCROLL_MSG_TOUCH_UP)?
LIBAROMA_CTL_LIST_ITEM_MSG_TOUCH_UP:
LIBAROMA_CTL_LIST_ITEM_MSG_TOUCH_CANCEL,
param_msg,
x, cy
);
}
if (mres&LIBAROMA_CTL_LIST_ITEM_MSGRET_NEED_DRAW){
if (_libaroma_ctl_list_dodraw_item(ctl,mi->touched)){
retval=LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW;
}
}
}
return retval;
}
break;
}
return 0;
} /* End of _libaroma_ctl_list_scroll_message */
/*
* Function : _libaroma_ctl_list_message
* Return Value: dword
* Descriptions: message handler
*/
dword _libaroma_ctl_list_message(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_SCROLL_CLIENTP client,
LIBAROMA_MSGP msg,
int x, int y){
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
dword res=0;
/* handle the message */
libaroma_mutex_lock(mi->imutex);
switch(msg->msg){
case LIBAROMA_CTL_SCROLL_MSG:{
res=_libaroma_ctl_list_scroll_message(
ctl, client, mi, msg->x, msg->y, x, y
);
}
break;
}
libaroma_mutex_unlock(mi->imutex);
return res;
} /* End of _libaroma_ctl_list_message */
/*
* Function : _libaroma_ctl_list_destroy
* Return Value: void
* Descriptions: destroy scroll list client
*/
void _libaroma_ctl_list_destroy(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_CLIENTP client){
if (client->handler!=&_libaroma_ctl_list_handler){
return;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
/* cleanup items */
LIBAROMA_CTL_LIST_ITEMP f = mi->first;
while(f){
LIBAROMA_CTL_LIST_ITEMP p = f;
f = p->next;
/* destroy */
if (p->handler->destroy!=NULL){
p->handler->destroy(ctl,p);
}
_libaroma_ctl_list_free_state(p);
free(p);
}
if (mi->threadn>0){
free(mi->threads);
}
/* free internal data */
libaroma_mutex_free(mi->imutex);
libaroma_mutex_free(mi->mutex);
free(mi);
client->internal=NULL;
client->handler=NULL;
} /* End of _libaroma_ctl_list_destroy */
/*
* Function : libaroma_ctl_list
* Return Value: LIBAROMA_CONTROLP
* Descriptions: create list scroll control
*/
LIBAROMA_CONTROLP libaroma_ctl_list(
LIBAROMA_WINDOWP win, word id,
int x, int y, int w, int h,
int horizontal_padding,
int vertical_padding,
word bg_color, byte flags
){
/* allocating internal data */
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP)
calloc(sizeof(LIBAROMA_CTL_LIST),1);
if (!mi){
ALOGW("libaroma_ctl_list cannot allocating memory for list control");
return NULL;
}
mi->vpad = libaroma_window_measure_point(vertical_padding);
mi->hpad = libaroma_window_measure_point(horizontal_padding);
mi->h = mi->vpad*2;
mi->flags = flags;
libaroma_mutex_init(mi->mutex);
libaroma_mutex_init(mi->imutex);
/* create scroll control */
LIBAROMA_CONTROLP ctl = libaroma_ctl_scroll(
win, id, x, y, w, h, bg_color, flags
);
/* set scroll client */
libaroma_ctl_scroll_set_client(
ctl,
(voidp) mi,
&_libaroma_ctl_list_handler
);
/* set initial height */
libaroma_ctl_scroll_set_height(ctl, mi->h);
return ctl;
} /* End of libaroma_ctl_list */
/*
* Function : __libaroma_ctl_list_repos_next_items
* Return Value: byte
* Descriptions: reposition next items
*/
byte __libaroma_ctl_list_repos_next_items(LIBAROMA_CTL_LIST_ITEMP first,
int y){
LIBAROMA_CTL_LIST_ITEMP f=first;
while(f){
f->y=y;
y+=f->h;
f = f->next;
}
return 1;
} /* End of __libaroma_ctl_list_repos_next_items */
/*
* Function : libaroma_ctl_list_getpos
* Return Value: LIBAROMA_CTL_LIST_TOUCHPOSP
* Descriptions: get touch positions
*/
LIBAROMA_CTL_LIST_TOUCHPOSP libaroma_ctl_list_getpos(LIBAROMA_CONTROLP ctl){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return NULL;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return NULL;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
return &mi->pos;
} /* End of libaroma_ctl_list_getpos */
/*
* Function : libaroma_ctl_list_get_item_internal
* Return Value: LIBAROMA_CTL_LIST_ITEMP
* Descriptions: get item at index or id
*/
LIBAROMA_CTL_LIST_ITEMP libaroma_ctl_list_get_item_internal(
LIBAROMA_CONTROLP ctl, int index, byte find_id
){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return NULL;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return NULL;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (!find_id){
if (index==-1){
return mi->last;
}
else if (index==0){
return mi->first;
}
}
int curr_index = 0;
libaroma_mutex_lock(mi->imutex);
LIBAROMA_CTL_LIST_ITEMP f = mi->first;
if (f){
while(f){
if (((!find_id)&&(curr_index==index))||((find_id)&&(f->id==index))) {
libaroma_mutex_unlock(mi->imutex);
return f;
}
f = f->next;
curr_index++;
}
}
libaroma_mutex_unlock(mi->imutex);
/* not found */
return NULL;
} /* End of libaroma_ctl_list_get_item_internal */
/*
* Function : libaroma_ctl_list_del_itemp_internal
* Return Value: byte
* Descriptions: del item from list
*/
byte libaroma_ctl_list_del_itemp_internal(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_LIST_ITEMP f
){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
libaroma_mutex_lock(mi->imutex);
libaroma_mutex_lock(mi->mutex);
if (f){
if ((f==mi->first)&&(f==mi->last)){
mi->first=mi->last=NULL;
}
else if (f==mi->first){
mi->first = f->next;
mi->first->prev=NULL;
__libaroma_ctl_list_repos_next_items(
mi->first,
f->y
);
}
else if (f==mi->last){
mi->last=f->prev;
mi->last->next=NULL;
}
else{
f->prev->next = f->next;
f->next->prev = f->prev;
__libaroma_ctl_list_repos_next_items(
f->next,
f->next->prev->y+f->next->prev->h
);
}
mi->itemn--;
mi->h-=f->h;
if (f->handler->destroy!=NULL){
f->handler->destroy(ctl,f);
}
_libaroma_ctl_list_free_state(f);
__libaroma_ctl_list_item_unreg_thread(ctl, f);
if (mi->touched==f){
mi->touched=NULL;
}
free(f);
libaroma_mutex_unlock(mi->mutex);
libaroma_mutex_unlock(mi->imutex);
libaroma_ctl_scroll_request_height(ctl, mi->h);
return 1;
}
libaroma_mutex_unlock(mi->mutex);
libaroma_mutex_unlock(mi->imutex);
return 0;
} /* End of libaroma_ctl_list_del_itemp_internal */
/*
* Function : libaroma_ctl_list_del_item_internal
* Return Value: byte
* Descriptions: del list item by id/index
*/
byte libaroma_ctl_list_del_item_internal(
LIBAROMA_CONTROLP ctl, int index, byte find_id
){
return
libaroma_ctl_list_del_itemp_internal(
ctl,
libaroma_ctl_list_get_item_internal(
ctl, index, find_id
)
);
} /* End of libaroma_ctl_list_del_item_internal */
/*
* Function : libaroma_ctl_list_is_valid
* Return Value: byte
* Descriptions: check control, if it was valid list control
*/
byte libaroma_ctl_list_is_valid(LIBAROMA_CONTROLP ctl){
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
return 1;
} /* End of libaroma_ctl_list_is_valid */
/*
* Function : libaroma_ctl_list_item_setheight
* Return Value: byte
* Descriptions: update item height
*/
byte libaroma_ctl_list_item_setheight(
LIBAROMA_CONTROLP ctl,LIBAROMA_CTL_LIST_ITEMP item, int h){
if (!item){
return 0;
}
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
if (item->h!=h){
mi->h-=item->h;
item->h=h;
mi->h+=item->h;
__libaroma_ctl_list_repos_next_items(item,item->y);
libaroma_ctl_scroll_request_height(ctl, mi->h);
return 1;
}
return 2;
} /* End of libaroma_ctl_list_item_setheight */
/*
* Function : libaroma_ctl_list_item_position
* Return Value: byte
* Descriptions: get item position
*/
byte libaroma_ctl_list_item_position(
LIBAROMA_CONTROLP ctl,LIBAROMA_CTL_LIST_ITEMP item,
LIBAROMA_RECTP rect, byte absolute){
if (!rect){
return 0;
}
if (!item){
return 0;
}
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
int x=0;
int y=0;
if (absolute){
libaroma_window_calculate_pos_abs(NULL,ctl,&x,&y);
}
else{
libaroma_window_calculate_pos(NULL,ctl,&x,&y);
}
int ctl_y = libaroma_ctl_scroll_get_scroll(ctl,NULL);
rect->x=x;
rect->y=item->y-(y+ctl_y+mi->vpad);
rect->w=ctl->w-(mi->hpad*2);
rect->h=item->h;
return 1;
} /* End of libaroma_ctl_list_item_position */
/*
* Function : libaroma_ctl_list_scroll_to_item
* Return Value: byte
* Descriptions: focus item to scroll
*/
byte libaroma_ctl_list_scroll_to_item(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_LIST_ITEMP item,
byte smooth
){
if (!item){
return 0;
}
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return 0;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return 0;
}
/*LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;*/
int sel_cy = item->y + (item->h>>1);
int draw_y = (ctl->h>>1) - sel_cy;
draw_y = (draw_y<0)?(0-draw_y):0;
if (smooth){
libaroma_ctl_scroll_request_pos(ctl,draw_y);
}
else{
libaroma_ctl_scroll_set_pos(ctl,draw_y);
}
return 1;
} /* End of libaroma_ctl_list_scroll_to_item */
/*
* Function : libaroma_ctl_list_add_item_internal
* Return Value: LIBAROMA_CTL_LIST_ITEMP
* Descriptions: add item internally
*/
LIBAROMA_CTL_LIST_ITEMP libaroma_ctl_list_add_item_internal(
LIBAROMA_CONTROLP ctl,
int id,
int height,
word flags,
voidp internal,
LIBAROMA_CTL_LIST_ITEM_HANDLERP handler,
int at_index){
if (!handler){
return NULL;
}
LIBAROMA_CTL_SCROLL_CLIENTP client = libaroma_ctl_scroll_get_client(ctl);
if (!client){
return NULL;
}
if (client->handler!=&_libaroma_ctl_list_handler){
return NULL;
}
LIBAROMA_CTL_LISTP mi = (LIBAROMA_CTL_LISTP) client->internal;
LIBAROMA_CTL_LIST_ITEMP item = (LIBAROMA_CTL_LIST_ITEMP)
calloc(sizeof(LIBAROMA_CTL_LIST_ITEM),1);
if (item==NULL){
ALOGW("list_add_item_internal cannot allocating memory for item");
return NULL;
}
libaroma_mutex_lock(mi->imutex);
libaroma_mutex_lock(mi->mutex);
item->y=0;
item->h=height;
item->id=id;
item->flags=flags;
item->handler=handler;
item->internal=internal;
if (mi->last==NULL){
mi->first=mi->last=item;
item->y=mi->vpad;
mi->h+=item->h;
}
else if (at_index<0){
/* at last */
item->prev=mi->last;
mi->last->next = item;
item->y = mi->last->y + mi->last->h;
mi->last = item;
mi->h+=item->h;
}
else if (at_index==0){
/* at first */
item->next = mi->first;
item->y=mi->vpad;
mi->first = item;
mi->h+=item->h;
__libaroma_ctl_list_repos_next_items(item,mi->vpad);
}
else{
int curr_index = 0;
LIBAROMA_CTL_LIST_ITEMP f = mi->first;
if (f){
while(f){
if (curr_index==at_index){
item->prev = f;
item->next = f->next;
__libaroma_ctl_list_repos_next_items(item,f->y+f->h);
f->next = item;
if (item->next==NULL){
mi->last = item;
}
mi->h+=item->h;
break;
}
f = f->next;
if (!f){
curr_index=-1;
break;
}
curr_index++;
}
}
else{
curr_index=-1;
}
if (curr_index<0){
/* add in last */
item->prev=mi->last;
mi->last->next = item;
item->y = mi->last->y + mi->last->h;
mi->last = item;
mi->h+=item->h;
}
}
mi->itemn++;
if (flags&LIBAROMA_CTL_LIST_ITEM_REGISTER_THREAD){
__libaroma_ctl_list_item_reg_thread(ctl,item);
}
libaroma_mutex_unlock(mi->mutex);
libaroma_mutex_unlock(mi->imutex);
/* set current height */
libaroma_ctl_scroll_request_height(ctl, mi->h);
return item;
} /* End of libaroma_ctl_list_add_item_internal */
/*
* Function : libaroma_listitem_nonitem
* Return Value: byte
* Descriptions: is non item
*/
byte libaroma_listitem_nonitem(LIBAROMA_CTL_LIST_ITEMP item){
if (!item){
return 1;
}
if (libaroma_listitem_isdivider(item)){
return 1;
}
return libaroma_listitem_iscaption(item);
} /* End of libaroma_listitem_nonitem */
#endif /* __libaroma_ctl_list_c__ */
|
matmult.c
|
#include <stdio.h>
#include <stdlib.h>
#include "matmult_initialize.h"
#ifndef MATRIX_SIZE
#define MATRIX_SIZE 512
#endif
#define NRA MATRIX_SIZE /* number of rows in matrix A */
#define NCA MATRIX_SIZE /* number of columns in matrix A */
#define NCB MATRIX_SIZE /* number of columns in matrix B */
double** allocateMatrix(int rows, int cols) {
int i;
double **matrix = (double**)malloc((sizeof(double*)) * rows);
for (i=0; i<rows; i++) {
matrix[i] = (double*)malloc((sizeof(double)) * cols);
}
return matrix;
}
void freeMatrix(double** matrix, int rows, int cols) {
int i;
for (i=0; i<rows; i++) {
free(matrix[i]);
}
free(matrix);
}
__inline double multiply(double a, double b) {
return a * b;
}
// cols_a and rows_b are the same value
void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for(j=0; j<cols_b; j++) {
for (k=0; k<cols_a; k++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for (k=0; k<cols_a; k++) {
for(j=0; j<cols_b; j++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
double do_work(void) {
double **a, /* matrix A to be multiplied */
**b, /* matrix B to be multiplied */
**c; /* result matrix C */
a = allocateMatrix(NRA, NCA);
b = allocateMatrix(NCA, NCB);
c = allocateMatrix(NRA, NCB);
/*** Spawn a parallel region explicitly scoping all variables ***/
initialize(a, NRA, NCA);
initialize(b, NCA, NCB);
initialize(c, NRA, NCB);
compute(a, b, c, NRA, NCA, NCB);
compute_interchange(a, b, c, NRA, NCA, NCB);
double result = c[0][1];
freeMatrix(a, NRA, NCA);
freeMatrix(b, NCA, NCB);
freeMatrix(c, NCA, NCB);
return result;
}
int main (int argc, char *argv[])
{
do_work();
printf("Done.\n");
return 0;
}
|
merge_when_user_omp_pragma.c
|
int main() {
int A[10][10];
int i,j;
// This pragma use to lead to an invalid omp code because we don't detect that an omp pragma already exist
#pragma omp parallel for
for(i=0; i<10; i++) {
for(j=0; j<10; j++) {
A[i][j] =0;
}
}
}
|
permutation_gen.c
|
/* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "splittable_mrg.h"
#include "graph_generator.h"
#include "permutation_gen.h"
#include "utils.h"
#include <stdint.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __MTA__
#include <sys/mta_task.h>
#endif
#ifdef GRAPH_GENERATOR_MPI
#include <mpi.h>
#endif
#ifdef GRAPH_GENERATOR_OMP
#include <omp.h>
#endif
typedef struct slot_data {
int64_t index, value;
} slot_data;
/* This code defines a simple closed-indexing hash table. It is used to speed
* up the rand_sort algorithm given below. Elements with -1 as index are
* unused; others are used. */
#ifdef __MTA__
#pragma mta inline
#endif
static inline void hashtable_insert(slot_data* ht, int64_t ht_size, int64_t index, int64_t value, int64_t hashval) {
int64_t i;
for (i = hashval; i < ht_size; ++i) {
if (int64_t_cas(&ht[i].index, (int64_t)(-1), index)) {
ht[i].value = value;
return;
}
}
for (i = 0; i < hashval; ++i) {
if (int64_t_cas(&ht[i].index, (int64_t)(-1), index)) {
ht[i].value = value;
return;
}
}
assert (!"Should not happen: overflow in hash table");
}
#ifdef __MTA__
#pragma mta inline
#endif
static inline int hashtable_count_key(const slot_data* ht, int64_t ht_size, int64_t index, int64_t hashval) {
int c = 0;
int64_t i;
for (i = hashval; i < ht_size && ht[i].index != (int64_t)(-1); ++i) {
if (ht[i].index == index) ++c;
}
if (i == ht_size) {
for (i = 0; i < hashval && ht[i].index != (int64_t)(-1); ++i) {
if (ht[i].index == index) ++c;
}
}
return c;
}
/* Return all values with the given index value into result array; return value
* of function is element count. */
#ifdef __MTA__
#pragma mta inline
#endif
static inline int hashtable_get_values(const slot_data* ht, int64_t ht_size, int64_t index, int64_t hashval, int64_t* result) {
int x = 0;
int64_t i;
for (i = hashval; i < ht_size && ht[i].index != (int64_t)(-1); ++i) {
if (ht[i].index == index) {
result[x++] = ht[i].value;
}
}
if (i == ht_size) {
for (i = 0; i < hashval && ht[i].index != (int64_t)(-1); ++i) {
if (ht[i].index == index) {
result[x++] = ht[i].value;
}
}
}
return x;
}
#ifdef __MTA__
#pragma mta inline
#endif
static inline void selection_sort(int64_t* a, int64_t n) {
int64_t i, j;
if (n <= 1) return;
for (i = 0; i + 1 < n; ++i) {
int64_t minpos = i;
for (j = i + 1; j < n; ++j) {
if (a[j] < a[minpos]) minpos = j;
}
if (minpos != i) {
int64_t t = a[minpos];
a[minpos] = a[i];
a[i] = t;
}
}
}
/* Fisher-Yates shuffle */
#ifdef __MTA__
#pragma mta inline
#endif
static inline void randomly_permute(int64_t* a, int64_t n, mrg_state* st) {
int64_t i, j;
if (n <= 1) return;
for (i = n - 1; i > 0; --i) {
j = random_up_to(st, i + 1);
if (i != j) {
int64_t t = a[i];
a[i] = a[j];
a[j] = t;
}
}
}
/* Exclusive prefix sum on ints; returns sum of overall input array */
static inline int int_prefix_sum(int* out, const int* in, size_t n) {
size_t i;
if (n == 0) return 0;
out[0] = 0;
for (i = 1; i < n; ++i) out[i] = out[i - 1] + in[i - 1];
return out[n - 1] + in[n - 1];
}
/* A variant of the rand_sort algorithm from Cong and Bader ("An Empirical
* Analysis of Parallel Random Permutation Algorithms on SMPs", Georgia Tech TR
* GT-CSE-06-06.pdf,
* <URL:http://smartech.gatech.edu/bitstream/1853/14385/1/GT-CSE-06-06.pdf>).
* Sorting here is done using a hash table to effectively act as a bucket sort.
* The rand_sort algorithm was chosen instead of the other algorithms in order
* to get reproducibility across architectures and processor counts. That is
* also the reason for the extra sort immediately before scrambling all
* elements with the same key, as well as the expensive PRNG operations. */
/* This version is for sequential machines, OpenMP, and the XMT. */
void rand_sort_shared(mrg_state* st, int64_t n, int64_t* result /* Array of size n */) {
int64_t hash_table_size = 2 * n + 128; /* Must be >n, preferably larger for performance */
slot_data* ht = (slot_data*)xmalloc(hash_table_size * sizeof(slot_data));
int64_t i;
int64_t index;
int64_t* bucket_counts;
int64_t* bucket_starts_in_result;
int64_t running_sum;
int64_t old_running_sum;
int64_t result_start_idx;
int64_t* temp;
int64_t bi;
mrg_state new_st;
#ifdef __MTA__
#pragma mta block schedule
#endif
#ifdef GRAPH_GENERATOR_OMP
#pragma omp parallel for
#endif
for (i = 0; i < hash_table_size; ++i) ht[i].index = (int64_t)(-1); /* Unused */
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
#ifdef GRAPH_GENERATOR_OMP
#pragma omp parallel for
#endif
/* Put elements into the hash table with random keys. */
for (i = 0; i < n; ++i) {
mrg_state new_st = *st;
mrg_skip(&new_st, 1, i, 0);
index = (int64_t)random_up_to(&new_st, hash_table_size);
hashtable_insert(ht, hash_table_size, index, i, index);
}
/* Count elements with each key in order to sort them by key. */
bucket_counts = (int64_t*)xcalloc(hash_table_size, sizeof(int64_t)); /* Uses zero-initialization */
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
#ifdef GRAPH_GENERATOR_OMP
#pragma omp parallel for
#endif
for (i = 0; i < hash_table_size; ++i) {
/* Count all elements with same index. */
bucket_counts[i] = hashtable_count_key(ht, hash_table_size, i, i);
}
/* bucket_counts replaced by its prefix sum (start of each bucket in output array) */
bucket_starts_in_result = bucket_counts;
running_sum = 0;
#ifdef __MTA__
#pragma mta block schedule
#endif
/* FIXME: parallelize this on OpenMP */
for (i = 0; i < hash_table_size; ++i) {
old_running_sum = running_sum;
running_sum += bucket_counts[i];
bucket_counts[i] = old_running_sum;
}
assert (running_sum == n);
bucket_counts = NULL;
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
#ifdef GRAPH_GENERATOR_OMP
#pragma omp parallel for
#endif
for (i = 0; i < hash_table_size; ++i) {
result_start_idx = bucket_starts_in_result[i];
temp = result + result_start_idx;
/* Gather up all elements with same key. */
bi = (int64_t)hashtable_get_values(ht, hash_table_size, i, i, temp);
if (bi > 1) {
/* Selection sort them (for consistency in parallel implementations). */
selection_sort(temp, bi);
/* Randomly permute them. */
new_st = *st;
mrg_skip(&new_st, 1, i, 100);
randomly_permute(temp, bi, &new_st);
}
}
free(ht); ht = NULL;
free(bucket_starts_in_result); bucket_starts_in_result = NULL;
}
#ifdef GRAPH_GENERATOR_MPI
void rand_sort_mpi(MPI_Comm comm, mrg_state* st, int64_t n,
int64_t* result_size_ptr,
int64_t** result_ptr /* Allocated using xmalloc() by
rand_sort_mpi */) {
int size, rank;
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
/* Make MPI data type for slot_data. */
MPI_Datatype slot_data_type;
{
int blocklens[] = {1, 1};
MPI_Aint temp_base, indices[2];
slot_data temp;
MPI_Get_address(&temp, &temp_base);
MPI_Get_address(&temp.index, &indices[0]);
MPI_Get_address(&temp.value, &indices[1]);
indices[0] -= temp_base;
indices[1] -= temp_base;
MPI_Datatype old_types[] = {INT64_T_MPI_TYPE, INT64_T_MPI_TYPE};
MPI_Type_struct(2, blocklens, indices, old_types, &slot_data_type);
MPI_Type_commit(&slot_data_type);
}
int64_t total_hash_table_size = 2 * n + 128; /* Must be >n, preferably larger for performance */
/* Hash table is distributed by blocks: first (total_hash_table_size % size)
* are of size (total_hash_table_size / size + 1), rest are of size
* (total_hash_table_size / size). This distribution is necessary so that
* the permutation can easily be assembled at the end of the function. */
int64_t ht_base_block_size = total_hash_table_size / size;
int ht_block_size_cutoff_rank = total_hash_table_size % size;
int64_t ht_block_size_cutoff_index = ht_block_size_cutoff_rank * (ht_base_block_size + 1);
int64_t ht_my_size = ht_base_block_size + (rank < ht_block_size_cutoff_rank);
int64_t ht_my_start = (rank < ht_block_size_cutoff_rank) ?
rank * (ht_base_block_size + 1) :
ht_block_size_cutoff_index + (rank - ht_block_size_cutoff_rank) * ht_base_block_size;
int64_t ht_my_end = ht_my_start + ht_my_size;
#define HT_OWNER(e) \
(((e) < ht_block_size_cutoff_index) ? \
(e) / (ht_base_block_size + 1) : \
ht_block_size_cutoff_rank + ((e) - ht_block_size_cutoff_index) / ht_base_block_size)
#define HT_LOCAL(e) ((e) - ht_my_start)
/* Input elements to scramble are distributed cyclically for simplicity;
* their distribution does not matter. */
int64_t elt_my_size = (n / size) + (rank < n % size);
int64_t i;
/* Cache the key-value pairs to avoid PRNG skip operations. Count the number
* of pairs going to each destination processor. */
slot_data* kv_pairs = (slot_data*)xmalloc(elt_my_size * sizeof(slot_data));
int* outcounts = (int*)xcalloc(size, sizeof(int)); /* Relies on zero-init */
for (i = 0; i < elt_my_size; ++i) {
mrg_state new_st = *st;
mrg_skip(&new_st, 1, i * size + rank, 0);
int64_t index = (int64_t)random_up_to(&new_st, total_hash_table_size);
int64_t owner = HT_OWNER(index);
assert (owner >= 0 && owner < size);
++outcounts[owner];
kv_pairs[i].index = index;
kv_pairs[i].value = i * size + rank;
}
int* outdispls = (int*)xmalloc(size * sizeof(int));
int total_outcount = int_prefix_sum(outdispls, outcounts, size);
slot_data* outdata = (slot_data*)xmalloc(total_outcount * sizeof(slot_data));
int* outoffsets = (int*)xmalloc(size * sizeof(int));
memcpy(outoffsets, outdispls, size * sizeof(int));
/* Put the key-value pairs into the output buffer, sorted by destination, to
* get ready for MPI_Alltoallv. */
for (i = 0; i < elt_my_size; ++i) {
int64_t index = kv_pairs[i].index;
int64_t owner = HT_OWNER(index);
outdata[outoffsets[owner]] = kv_pairs[i];
++outoffsets[owner];
}
free(kv_pairs); kv_pairs = NULL;
for (i = 0; i < size; ++i) {
assert (outoffsets[i] == outdispls[i] + outcounts[i]);
}
free(outoffsets); outoffsets = NULL;
int* incounts = (int*)xmalloc(size * sizeof(int));
/* Send data counts. */
MPI_Alltoall(outcounts, 1, MPI_INT, incounts, 1, MPI_INT, comm);
int* indispls = (int*)xmalloc(size * sizeof(int));
int total_incount = int_prefix_sum(indispls, incounts, size);
slot_data* indata = (slot_data*)xmalloc(total_incount * sizeof(slot_data));
/* Send data to put into hash table. */
MPI_Alltoallv(outdata, outcounts, outdispls, slot_data_type,
indata, incounts, indispls, slot_data_type,
comm);
free(outdata); outdata = NULL;
free(outcounts); outcounts = NULL;
free(outdispls); outdispls = NULL;
free(incounts); incounts = NULL;
free(indispls); indispls = NULL;
MPI_Type_free(&slot_data_type);
/* Create the local part of the hash table. */
slot_data* ht = (slot_data*)xmalloc(ht_my_size * sizeof(slot_data));
for (i = ht_my_start; i < ht_my_end; ++i) {
ht[HT_LOCAL(i)].index = (int64_t)(-1); /* Unused */
}
for (i = 0; i < total_incount; ++i) {
int64_t index = indata[i].index, value = indata[i].value;
assert (HT_OWNER(index) == rank);
hashtable_insert(ht, ht_my_size, index, value, HT_LOCAL(index));
}
free(indata); indata = NULL;
/* Make the local part of the result. Most of the rest of this code is
* similar to the shared-memory/XMT version above. */
int64_t* result = (int64_t*)xmalloc(total_incount * sizeof(int64_t));
*result_ptr = result;
*result_size_ptr = total_incount;
int64_t* bucket_counts = (int64_t*)xmalloc(ht_my_size * sizeof(int64_t));
for (i = ht_my_start; i < ht_my_end; ++i) {
/* Count all elements with same index. */
bucket_counts[HT_LOCAL(i)] = hashtable_count_key(ht, ht_my_size, i, HT_LOCAL(i));
}
/* bucket_counts replaced by its prefix sum (start of each bucket in output array) */
int64_t* bucket_starts_in_result = bucket_counts;
int64_t running_sum = 0;
for (i = 0; i < ht_my_size; ++i) {
int64_t old_running_sum = running_sum;
running_sum += bucket_counts[i];
bucket_counts[i] = old_running_sum;
}
assert (running_sum == total_incount);
bucket_counts = NULL;
for (i = ht_my_start; i < ht_my_end; ++i) {
int64_t result_start_idx = bucket_starts_in_result[HT_LOCAL(i)];
int64_t* temp = result + result_start_idx;
/* Gather up all elements with same key. */
int64_t bi = (int64_t)hashtable_get_values(ht, ht_my_size, i, HT_LOCAL(i), temp);
if (bi > 1) {
/* Selection sort them (for consistency in parallel implementations). */
selection_sort(temp, bi);
/* Randomly permute them. */
mrg_state new_st = *st;
mrg_skip(&new_st, 1, i, 100);
randomly_permute(temp, bi, &new_st);
}
}
free(ht); ht = NULL;
free(bucket_starts_in_result); bucket_starts_in_result = NULL;
}
#undef HT_OWNER
#undef HT_LOCAL
#endif /* GRAPH_GENERATOR_MPI */
/* Code below this is used for testing the permutation generators. */
#if 0
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
const int64_t n = 200000;
int64_t* result = NULL;
int64_t result_size;
mrg_state st;
uint_fast32_t seed[5] = {1, 2, 3, 4, 5};
mrg_seed(&st, seed);
MPI_Barrier(MPI_COMM_WORLD);
double start = MPI_Wtime();
rand_sort_mpi(MPI_COMM_WORLD, &st, n, &result_size, &result);
MPI_Barrier(MPI_COMM_WORLD);
double time = MPI_Wtime() - start;
#if 0
int64_t i;
printf("My count = %" PRId64 "\n", result_size);
for (i = 0; i < result_size; ++i) printf("%" PRId64 "\n", result[i]);
#endif
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
printf("Shuffle of %" PRId64 " element(s) took %f second(s).\n", n, time);
}
free(result); result = NULL;
MPI_Finalize();
return 0;
}
#endif
#if 0
int main(int argc, char** argv) {
const int64_t n = 5000000;
int64_t* result = (int64_t*)xmalloc(n * sizeof(int64_t));
mrg_state st;
uint_fast32_t seed[5] = {1, 2, 3, 4, 5};
mrg_seed(&st, seed);
unsigned long time;
#pragma mta fence
time = mta_get_clock(0);
rand_sort_shared(&st, n, result);
#pragma mta fence
time = mta_get_clock(time);
#if 0
int64_t i;
for (i = 0; i < n; ++i) printf("%" PRId64 "\n", result[i]);
#endif
printf("Shuffle of %" PRId64 " element(s) took %f second(s).\n", n, time * mta_clock_period());
free(result); result = NULL;
return 0;
}
#endif
#if 0
int main(int argc, char** argv) {
const int64_t n = 5000000;
int64_t* result = (int64_t*)xmalloc(n * sizeof(int64_t));
mrg_state st;
uint_fast32_t seed[5] = {1, 2, 3, 4, 5};
mrg_seed(&st, seed);
double time;
time = omp_get_wtime();
rand_sort_shared(&st, n, result);
time = omp_get_wtime() - time;
#if 0
int64_t i;
for (i = 0; i < n; ++i) printf("%" PRId64 "\n", result[i]);
#endif
printf("Shuffle of %" PRId64 " element(s) took %f second(s).\n", n, time);
free(result); result = NULL;
return 0;
}
#endif
|
GB_binop__lxor_fp32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp32)
// A*D function (colscale): GB (_AxD__lxor_fp32)
// D*A function (rowscale): GB (_DxB__lxor_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp32)
// C=scalar+B GB (_bind1st__lxor_fp32)
// C=scalar+B' GB (_bind1st_tran__lxor_fp32)
// C=A+scalar GB (_bind2nd__lxor_fp32)
// C=A'+scalar GB (_bind2nd_tran__lxor_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_FP32 || GxB_NO_LXOR_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_subassign_02.c
|
//------------------------------------------------------------------------------
// GB_subassign_02: C(I,J) = A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 02: C(I,J) = A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: NULL
// A: matrix
// S: constructed
// C: not bitmap or full: use GB_bitmap_assign instead
// A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_02
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_A ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 02: C(I,J) = A ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in A+S must be examined, so the work is
// Omega (nnz(A)+nnz(S)).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
// TODO: phase2 of Method 02 and 04 are identical and could be
// done in a single function.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && Afound)
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =A ): copy A into C, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_noaccum_C_A_1_matrix ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =A ): copy A into C, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_noaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list S (:,j) has entries. List A (:,j) exhausted.
while (pS < pS_end)
{
// ----[C . 1] or [X . 1]-----------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
initAtoms.c
|
/// \file
/// Initialize the atom configuration.
#include "initAtoms.h"
#include <math.h>
#include <assert.h>
#include "constants.h"
#include "decomposition.h"
#include "parallel.h"
#include "random.h"
#include "linkCells.h"
#include "timestep.h"
#include "memUtils.h"
#include "performanceTimers.h"
static void computeVcm(SimFlat* s, real_t vcm[3]);
/// \details
/// Call functions such as createFccLattice and setTemperature to set up
/// initial atom positions and momenta.
Atoms* initAtoms(LinkCell* boxes)
{
Atoms* atoms = comdMalloc(sizeof(Atoms));
int maxTotalAtoms = MAXATOMS*boxes->nTotalBoxes;
{
atoms->gid = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->iSpecies = (int*) comdMalloc(maxTotalAtoms*sizeof(int));
atoms->r = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->p = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->f = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3));
atoms->U = (real_t*)comdMalloc(maxTotalAtoms*sizeof(real_t));
}
atoms->nLocal = 0;
atoms->nGlobal = 0;
#pragma sst compute
for (int iOff = 0; iOff < maxTotalAtoms; iOff++)
{
atoms->gid[iOff] = 0;
atoms->iSpecies[iOff] = 0;
zeroReal3(atoms->r[iOff]);
zeroReal3(atoms->p[iOff]);
zeroReal3(atoms->f[iOff]);
atoms->U[iOff] = 0.;
}
return atoms;
}
void destroyAtoms(Atoms *atoms)
{
freeMe(atoms,gid);
freeMe(atoms,iSpecies);
freeMe(atoms,r);
freeMe(atoms,p);
freeMe(atoms,f);
freeMe(atoms,U);
comdFree(atoms);
}
/// Creates atom positions on a face centered cubic (FCC) lattice with
/// nx * ny * nz unit cells and lattice constant lat.
/// Set momenta to zero.
void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat* s)
{
const real_t* localMin = s->domain->localMin; // alias
const real_t* localMax = s->domain->localMax; // alias
int nb = 4; // number of atoms in the basis
real3 basis[4] = { {0.25, 0.25, 0.25},
{0.25, 0.75, 0.75},
{0.75, 0.25, 0.75},
{0.75, 0.75, 0.25} };
// create and place atoms
int begin[3];
int end[3];
for (int ii=0; ii<3; ++ii)
{
begin[ii] = floor(localMin[ii]/lat);
end[ii] = ceil (localMax[ii]/lat);
}
real_t px,py,pz;
px=py=pz=0.0;
#pragma sst compute
for (int ix=begin[0]; ix<end[0]; ++ix)
for (int iy=begin[1]; iy<end[1]; ++iy)
for (int iz=begin[2]; iz<end[2]; ++iz)
for (int ib=0; ib<nb; ++ib)
{
real_t rx = (ix+basis[ib][0]) * lat;
real_t ry = (iy+basis[ib][1]) * lat;
real_t rz = (iz+basis[ib][2]) * lat;
if (rx < localMin[0] || rx >= localMax[0]) continue;
if (ry < localMin[1] || ry >= localMax[1]) continue;
if (rz < localMin[2] || rz >= localMax[2]) continue;
int id = ib+nb*(iz+nz*(iy+ny*(ix)));
putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz);
}
#pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz))
s->atoms->nGlobal = 0;
if (getMyRank() == 0)
printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n",
nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal);
#pragma sst init s->atoms->nGlobal / getNRanks()
s->atoms->nLocal = s->atoms->nLocal;
s->boxes->nTotalAtoms = s->atoms->nLocal;
// set total atoms in simulation
startTimer(commReduceTimer);
addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1);
stopTimer(commReduceTimer);
#pragma sst delete
assert(s->atoms->nGlobal == nb*nx*ny*nz);
}
/// Sets the center of mass velocity of the system.
/// \param [in] newVcm The desired center of mass velocity.
void setVcm(SimFlat* s, real_t newVcm[3])
{
real_t oldVcm[3];
computeVcm(s, oldVcm);
real_t vShift[3];
vShift[0] = (newVcm[0] - oldVcm[0]);
vShift[1] = (newVcm[1] - oldVcm[1]);
vShift[2] = (newVcm[2] - oldVcm[2]);
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iSpecies = s->atoms->iSpecies[iOff];
real_t mass = s->species[iSpecies].mass;
s->atoms->p[iOff][0] += mass * vShift[0];
s->atoms->p[iOff][1] += mass * vShift[1];
s->atoms->p[iOff][2] += mass * vShift[2];
}
}
}
/// Sets the temperature of system.
///
/// Selects atom velocities randomly from a boltzmann (equilibrium)
/// distribution that corresponds to the specified temperature. This
/// random process will typically result in a small, but non zero center
/// of mass velocity and a small difference from the specified
/// temperature. For typical MD runs these small differences are
/// unimportant, However, to avoid possible confusion, we set the center
/// of mass velocity to zero and scale the velocities to exactly match
/// the input temperature.
void setTemperature(SimFlat* s, real_t temperature)
{
s->initialTemp = temperature;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// set initial velocities for the distribution
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
int iType = s->atoms->iSpecies[iOff];
real_t mass = s->species[iType].mass;
real_t sigma = sqrt(kB_eV * temperature/mass);
uint64_t seed = mkSeed(s->atoms->gid[iOff], 123);
s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed);
s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed);
}
}
// compute the resulting temperature
// kinetic energy = 3/2 kB * Temperature
if (temperature == 0.0) return;
real_t vZero[3] = {0., 0., 0.};
setVcm(s, vZero);
kineticEnergy(s);
real_t temp = (s->eKinetic/s->atoms->nGlobal)/kB_eV/1.5;
// scale the velocities to achieve the target temperature
real_t scaleFactor = sqrt(temperature/temp);
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
s->atoms->p[iOff][0] *= scaleFactor;
s->atoms->p[iOff][1] *= scaleFactor;
s->atoms->p[iOff][2] *= scaleFactor;
}
}
kineticEnergy(s);
temp = s->eKinetic/s->atoms->nGlobal/kB_eV/1.5;
}
/// Add a random displacement to the atom positions.
/// Atoms are displaced by a random distance in the range
/// [-delta, +delta] along each axis.
/// \param [in] delta The maximum displacement (along each axis).
void randomDisplacements(SimFlat* s, real_t delta)
{
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
#pragma omp parallel for
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
uint64_t seed = mkSeed(s->atoms->gid[iOff], 457);
s->atoms->r[iOff][0] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][1] += (2.0*lcg61(&seed)-1.0) * delta;
s->atoms->r[iOff][2] += (2.0*lcg61(&seed)-1.0) * delta;
}
}
}
/// Computes the center of mass velocity of the system.
void computeVcm(SimFlat* s, real_t vcm[3])
{
real_t vcmLocal[4] = {0., 0., 0., 0.};
real_t vcmSum[4] = {0., 0., 0., 0.};
real_t v0 = 0.0;
real_t v1 = 0.0;
real_t v2 = 0.0;
real_t v3 = 0.0;
int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes;
// sum the momenta and particle masses
#pragma omp parallel for reduction(+:v0) reduction(+:v1) reduction(+:v2) reduction(+:v3)
for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox)
{
#pragma sst loop_count avgAtomsPerBox
for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff)
{
v0 += s->atoms->p[iOff][0];
v1 += s->atoms->p[iOff][1];
v2 += s->atoms->p[iOff][2];
int iSpecies = s->atoms->iSpecies[iOff];
v3 += s->species[iSpecies].mass;
}
}
vcmLocal[0] = v0;
vcmLocal[1] = v1;
vcmLocal[2] = v2;
vcmLocal[3] = v3;
startTimer(commReduceTimer);
addRealParallel(vcmLocal, vcmSum, 4);
stopTimer(commReduceTimer);
real_t totalMass = vcmSum[3];
vcm[0] = vcmSum[0]/totalMass;
vcm[1] = vcmSum[1]/totalMass;
vcm[2] = vcmSum[2]/totalMass;
}
|
parallel_priority_queue.h
|
/***************************************************************************
* include/stxxl/bits/containers/parallel_priority_queue.h
*
* Part of the STXXL. See http://stxxl.org
*
* Copyright (C) 2014-2015 Thomas Keh <[email protected]>
* Copyright (C) 2014-2015 Timo Bingmann <[email protected]>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#define STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#if STXXL_PARALLEL
#include <omp.h>
#endif
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <ctime>
#include <functional>
#include <limits>
#include <list>
#include <mutex>
#include <numeric>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include <tlx/define.hpp>
#include <tlx/die.hpp>
#include <tlx/logger.hpp>
#include <tlx/string.hpp>
#include <foxxll/common/timer.hpp>
#include <foxxll/common/types.hpp>
#include <foxxll/io/request_operations.hpp>
#include <foxxll/mng/block_alloc_strategy.hpp>
#include <foxxll/mng/block_manager.hpp>
#include <foxxll/mng/buf_ostream.hpp>
#include <foxxll/mng/prefetch_pool.hpp>
#include <foxxll/mng/read_write_pool.hpp>
#include <foxxll/mng/typed_block.hpp>
#include <stxxl/bits/common/custom_stats.h>
#include <stxxl/bits/common/is_heap.h>
#include <stxxl/bits/common/swap_vector.h>
#include <stxxl/bits/common/winner_tree.h>
#include <stxxl/bits/config.h>
#include <stxxl/bits/defines.h>
#include <stxxl/bits/parallel.h>
#include <stxxl/seed>
#include <stxxl/types>
namespace stxxl {
namespace ppq_local {
/*!
* A random-access iterator class for block oriented data. The iterator is
* intended to be provided by the internal_array and external_array classes
* and to be used by the multiway_merge algorithm as input iterators.
*
* \tparam ValueType the value type
*/
template <class ValueType>
class ppq_iterator
{
public:
using value_type = ValueType;
using reference = value_type &;
using pointer = value_type *;
using difference_type = ptrdiff_t;
using iterator_category = std::random_access_iterator_tag;
using block_pointers_type = std::vector<std::pair<pointer, pointer> >;
protected:
using self_type = ppq_iterator;
//! pointer to a vector of begin/end pointer pairs
//! They allow access to the data blocks.
const block_pointers_type* m_block_pointers;
//! pointer to the current element
pointer m_current;
//! index of the current element
size_t m_index;
//! index of the current element's block
size_t m_block_index;
//! size of each data block
size_t m_block_items;
public:
//! default constructor (should not be used directly)
ppq_iterator()
: m_block_pointers(nullptr)
{ }
//! constructor
//!
//! \param block_pointers A reference to the properly initialized vector of begin and end pointers.
//! One pair for each block. The pointers should be valid for all blocks that
//! are expected to be accessed with this iterator.
//! \param block_items The size of a single block. If there is only one block (e.g. if the iterator
//! belongs to an internal_array), use the total size here.
//! \param index The index of the current element (global - index 0 belongs to the first element
//! in the first block, no matter if the values are still valid)
ppq_iterator(const block_pointers_type* block_pointers, size_t block_items,
size_t index)
: m_block_pointers(block_pointers),
m_index(index),
m_block_items(block_items)
{
update();
}
//! returns the value's index in the internal or external array
size_t get_index() const
{
return m_index;
}
reference operator * () const
{
assert(m_current);
return *m_current;
}
pointer operator -> () const
{
return &(operator * ());
}
reference operator [] (difference_type relative_index) const
{
const difference_type index = m_index + relative_index;
const size_t block_index = index / m_block_items;
const size_t local_index = index % m_block_items;
assert(block_index < m_block_pointers->size());
assert((*m_block_pointers)[block_index].first + local_index
< (*m_block_pointers)[block_index].second);
return *((*m_block_pointers)[block_index].first + local_index);
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
++m_current;
if (TLX_UNLIKELY(m_current == (*m_block_pointers)[m_block_index].second)) {
if (m_block_index + 1 < m_block_pointers->size()) {
m_current = (*m_block_pointers)[++m_block_index].first;
}
else {
// global end
assert(m_block_index + 1 == m_block_pointers->size());
m_current = (*m_block_pointers)[m_block_index++].second;
}
}
return *this;
}
//! prefix-decrement operator
self_type& operator -- ()
{
assert(m_index > 0);
--m_index;
if (m_block_index >= m_block_pointers->size()
|| m_current == (*m_block_pointers)[m_block_index].first) {
// begin of current block or global end
assert(m_block_index > 0);
assert(m_block_index <= m_block_pointers->size());
m_current = (*m_block_pointers)[--m_block_index].second - 1;
}
else {
--m_current;
}
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_block_pointers, m_block_items, m_index + addend);
}
self_type& operator += (difference_type addend)
{
m_index += addend;
update();
return *this;
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_block_pointers, m_block_items, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
self_type& operator -= (difference_type subtrahend)
{
m_index -= subtrahend;
update();
return *this;
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
friend std::ostream& operator << (std::ostream& os, const ppq_iterator& i)
{
return os << "[" << i.m_index << "]";
}
private:
//! updates m_block_index and m_current based on m_index
inline void update()
{
m_block_index = m_index / m_block_items;
const size_t local_index = m_index % m_block_items;
if (m_block_index < m_block_pointers->size()) {
m_current = (*m_block_pointers)[m_block_index].first + local_index;
assert(m_current <= (*m_block_pointers)[m_block_index].second);
}
else {
// global end if end is beyond the last real block
assert(m_block_index == m_block_pointers->size());
assert(local_index == 0);
//-tb old: m_current = (*m_block_pointers)[m_block_index - 1].second;
m_current = nullptr;
}
}
};
/*!
* Internal arrays store a sorted sequence of values in RAM, which will be
* merged together into the deletion buffer when it needs to be
* refilled. Internal arrays are constructed from the insertions heaps when
* they overflow.
*/
template <class ValueType>
class internal_array
{
public:
using value_type = ValueType;
using iterator = ppq_iterator<value_type>;
protected:
using block_pointers_type = typename iterator::block_pointers_type;
//! Contains the items of the sorted sequence.
std::vector<value_type> m_values;
//! Index of the current head
size_t m_min_index;
//! Level of internal array (Sander's PQ: group number)
size_t m_level;
//! Begin and end pointers of the array
//! This is used by the iterator
block_pointers_type m_block_pointers;
public:
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
internal_array() : m_min_index(0) { }
//! Constructor which takes a value vector. The value vector is empty
//! afterwards.
explicit internal_array(std::vector<value_type>& values,
const size_t min_index = 0,
const size_t level = 0)
: m_values(), m_min_index(min_index), m_level(level),
m_block_pointers(1)
{
std::swap(m_values, values);
assert(m_values.size() > 0);
m_block_pointers[0] = std::make_pair(&(*m_values.begin()), &(*m_values.begin()) + m_values.size());
}
//! non-copyable: delete copy-constructor
internal_array(const internal_array&) = delete;
//! non-copyable: delete assignment operator
internal_array& operator = (const internal_array&) = delete;
//! Swap internal_array with another one.
void swap(internal_array& o)
{
using std::swap;
swap(m_values, o.m_values);
swap(m_min_index, o.m_min_index);
swap(m_level, o.m_level);
swap(m_block_pointers, o.m_block_pointers);
}
//! Swap internal_array with another one.
friend void swap(internal_array& a, internal_array& b)
{
a.swap(b);
}
//! Random access operator
inline value_type& operator [] (size_t i)
{
return m_values[i];
}
//! Use inc_min(diff) if multiple values have been extracted.
inline void inc_min(size_t diff = 1)
{
m_min_index += diff;
}
//! The currently smallest element in the array.
inline const value_type & get_min() const
{
return m_values[m_min_index];
}
//! The index of the currently smallest element in the array.
inline size_t get_min_index() const
{
return m_min_index;
}
//! The index of the largest element in the array.
inline size_t get_max_index() const
{
return (m_values.size() - 1);
}
//! Returns if the array has run empty.
inline bool empty() const
{
return (m_min_index >= m_values.size());
}
//! Make this array empty.
inline void make_empty()
{
m_min_index = m_values.size();
}
//! Returns the current size of the array.
inline size_t size() const
{
return (m_values.size() - m_min_index);
}
//! Returns the initial size of the array.
inline size_t capacity() const
{
return m_values.size();
}
//! Returns the level (group number) of the array.
inline size_t level() const
{
return m_level;
}
//! Return the amount of internal memory used by an array with the capacity
//! in number of items.
static size_t int_memory(size_t capacity)
{
return sizeof(internal_array) + capacity * sizeof(value_type);
}
//! Return the amount of internal memory used by the array
inline size_t int_memory() const
{
return int_memory(m_values.capacity());
}
//! Begin iterator
inline iterator begin() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), m_min_index);
}
//! End iterator
inline iterator end() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), capacity());
}
};
template <class ExternalArrayType>
class external_array_writer;
/*!
* External array stores a sorted sequence of values on the hard disk and
* allows access to the first block (containing the smallest values). The
* class uses buffering and prefetching in order to improve the performance.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* foxxll::default_alloc_strategy.
*/
template <
class ValueType,
size_t BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
class AllocStrategy = foxxll::default_alloc_strategy
>
class external_array
{
public:
using value_type = ValueType;
using iterator = ppq_iterator<value_type>;
using self_type = external_array<value_type, BlockSize, AllocStrategy>;
using block_type = foxxll::typed_block<BlockSize, value_type>;
using pool_type = foxxll::read_write_pool<block_type>;
using bid_vector = std::vector<foxxll::BID<BlockSize> >;
using bid_iterator = typename bid_vector::iterator;
using block_vector = std::vector<block_type*>;
using request_ptr = foxxll::request_ptr;
using request_vector = std::vector<request_ptr>;
using minima_vector = std::vector<value_type>;
using block_pointers_type = typename iterator::block_pointers_type;
using writer_type = external_array_writer<self_type>;
//! The number of elements fitting into one block
enum {
block_size = BlockSize,
block_items = BlockSize / sizeof(value_type)
};
static const bool debug = false;
protected:
//! The total size of the external array in items. Cannot be changed
//! after construction.
external_size_type m_capacity;
//! Number of blocks, again: calculated at construction time.
size_t m_num_blocks;
//! Level of external array (Sander's PQ: group number)
size_t m_level;
//! Common prefetch and write buffer pool
pool_type* m_pool;
//! The IDs of each block in external memory.
bid_vector m_bids;
//! A vector of size m_num_blocks with block_type pointers, some of them
//! will be filled while writing, but most are nullptr.
block_vector m_blocks;
//! Begin and end pointers for each block, used for merging with
//! ppq_iterator.
block_pointers_type m_block_pointers;
//! The read request pointers are used to wait until the block has been
//! completely fetched.
request_vector m_requests;
//! stores the minimum value of each block
minima_vector m_minima;
//! Is array in write phase? True = write phase, false = read phase.
bool m_write_phase;
//! The total number of elements minus the number of extracted values
external_size_type m_size;
//! The read position in the array.
external_size_type m_index;
//! The index behind the last element that is located in RAM (or is at
//! least requested to be so)
external_size_type m_end_index;
//! The first unhinted block index.
size_t m_unhinted_block;
//! The first unhinted block index as it was before the
//! prepare_rebuilding_hints() call. Used for removal of hints which aren't
//! needed anymore.
size_t m_old_unhinted_block;
//! allow writer to access to all variables
friend class external_array_writer<self_type>;
public:
/*!
* Constructs an external array
*
* \param size The total number of elements. Cannot be changed after
* construction.
*
* \param pool A pool (read_write_pool<block_type>) of read and write buffer blocks
*
* \param level Level index in the merge hierarchy
*/
external_array(const external_size_type size, pool_type* pool, const size_t level = 0)
: // constants
m_capacity(size),
m_num_blocks(static_cast<size_t>(foxxll::div_ceil(m_capacity, block_items))),
m_level(level),
m_pool(pool),
// vectors
m_bids(m_num_blocks),
m_blocks(m_num_blocks, reinterpret_cast<block_type*>(1)),
m_block_pointers(m_num_blocks),
m_requests(m_num_blocks),
m_minima(m_num_blocks),
// state
m_write_phase(true),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{
assert(m_capacity > 0);
// allocate blocks in EM.
foxxll::block_manager* bm = foxxll::block_manager::get_instance();
bm->new_blocks(AllocStrategy(), m_bids.begin(), m_bids.end());
}
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
external_array()
: // constants
m_capacity(0),
m_num_blocks(0),
m_level(0),
m_pool(nullptr),
// vectors
m_bids(0),
m_blocks(0),
m_block_pointers(0),
m_requests(0),
m_minima(0),
// state
m_write_phase(false),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{ }
//! Swap external_array with another one.
void swap(external_array& o)
{
using std::swap;
// constants
swap(m_capacity, o.m_capacity);
swap(m_num_blocks, o.m_num_blocks);
swap(m_level, o.m_level);
swap(m_pool, o.m_pool);
// vectors
swap(m_bids, o.m_bids);
swap(m_requests, o.m_requests);
swap(m_blocks, o.m_blocks);
swap(m_block_pointers, o.m_block_pointers);
swap(m_minima, o.m_minima);
// state
swap(m_write_phase, o.m_write_phase);
// indices
swap(m_size, o.m_size);
swap(m_index, o.m_index);
swap(m_end_index, o.m_end_index);
swap(m_unhinted_block, o.m_unhinted_block);
swap(m_old_unhinted_block, o.m_old_unhinted_block);
}
//! Swap external_array with another one.
friend void swap(external_array& a, external_array& b)
{
a.swap(b);
}
//! non-copyable: delete copy-constructor
external_array(const external_array&) = delete;
//! non-copyable: delete assignment operator
external_array& operator = (const external_array&) = delete;
//! Destructor
~external_array()
{
if (m_size == 0) return;
// not all data has been read! this only happen when the PPQ is
// destroyed while containing data.
const size_t block_index = m_index / block_items;
const size_t end_block_index = get_end_block_index();
// released blocks currently held in RAM
for (size_t i = block_index; i < end_block_index; ++i) {
m_pool->add_prefetch(m_blocks[i]);
// cannot report the number of freed blocks to PPQ.
}
// cancel currently hinted blocks
for (size_t i = end_block_index; i < m_unhinted_block; ++i) {
LOG << "ea[" << this << "]: discarding prefetch hint on block " << i;
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = nullptr;
m_requests[i] = request_ptr();
}
// figure out first block that is still allocated in EM.
bid_iterator i_begin = m_bids.begin() + block_index;
foxxll::block_manager::get_instance()->delete_blocks(i_begin, m_bids.end());
// check that all is empty
for (size_t i = block_index; i < end_block_index; ++i)
assert(m_blocks[i] == nullptr);
}
//! Returns the capacity in items.
size_t capacity() const
{
return m_capacity;
}
//! Returns the current size in items.
size_t size() const
{
return m_size;
}
//! Returns true if the array is empty.
bool empty() const
{
return (m_size == 0);
}
//! Returns the level (group number) of the array.
inline size_t level() const
{
return m_level;
}
//! Return the number of blocks.
size_t num_blocks() const
{
return m_num_blocks;
}
//! Returns memory usage of EA with given capacity, excluding blocks loaded
//! in RAM. Blocks belong to prefetch pool.
static size_t int_memory(size_t capacity)
{
size_t num_blocks = foxxll::div_ceil(capacity, block_items);
return sizeof(external_array)
+ num_blocks * sizeof(typename bid_vector::value_type)
+ num_blocks * sizeof(typename block_vector::value_type)
+ num_blocks * sizeof(typename block_pointers_type::value_type)
+ num_blocks * sizeof(typename request_vector::value_type)
+ num_blocks * sizeof(typename minima_vector::value_type);
}
//! Return the amount of internal memory used by the EA.
inline size_t int_memory() const
{
return int_memory(m_capacity);
}
//! Returns the number elements available in internal memory
size_t buffer_size() const
{
return (m_end_index - m_index);
}
//! Returns the block beyond the block in which *(m_end_index-1) is located.
size_t get_end_block_index() const
{
size_t end_block_index = m_end_index / block_items;
// increase block index if inside the block
if (m_end_index % block_items != 0)
++end_block_index;
assert(end_block_index <= m_num_blocks);
return end_block_index;
}
//! Returns the block in which m_index is located.
inline size_t get_current_block_index() const
{
return (m_index / block_items);
}
//! Returns a random-access iterator to the begin of the data
//! in internal memory.
iterator begin() const
{
//-TODO?: assert(block_valid(m_index / block_items) || m_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_index);
}
//! Returns a random-access iterator 1 behind the end of the data
//! in internal memory.
iterator end() const
{
//-TODO? assert(!block_valid(m_end_index / block_items) || m_end_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_end_index);
}
//! Returns the smallest element in the array
const value_type & get_min()
{
return *begin();
}
//! Returns if there is data in EM, that's not randomly accessible.
bool has_em_data() const
{
return (get_end_block_index() < m_num_blocks);
}
//! Returns the smallest element of the first block NOT in internal memory
//! (or at least requested to be in internal memory)
const value_type & get_next_block_min() const
{
assert(get_end_block_index() < m_num_blocks);
return m_minima[get_end_block_index()];
}
//! Returns if the data requested to be in internal memory is
//! completely fetched. True if wait() has been called before.
bool valid() const
{
bool result = true;
const size_t block_index = static_cast<size_t>(m_index / block_items);
const size_t end_block_index = get_end_block_index();
for (size_t i = block_index; i < end_block_index; ++i) {
result = result && block_valid(i);
}
return result;
}
//! Random access operator for data in internal memory
//! You should call wait() once after fetching data from EM.
value_type& operator [] (size_t i) const
{
assert(i < m_capacity);
const size_t block_index = i / block_items;
const size_t local_index = i % block_items;
assert(i < m_capacity);
assert(block_valid(block_index));
return m_blocks[block_index]->elem[local_index];
}
public:
//! prepare the pool for writing external arrays with given number of
//! threads
static void prepare_write_pool(pool_type& pool, const size_t num_threads)
{
size_t write_blocks = num_threads;
// need at least one
if (write_blocks == 0) write_blocks = 1;
// for holding boundary blocks
write_blocks *= 2;
// more disks than threads?
if (write_blocks < foxxll::config::get_instance()->disks_number())
write_blocks = foxxll::config::get_instance()->disks_number();
#if STXXL_EXPENSIVE_ASSERTIONS
// required for re-reading the external array
write_blocks = 2 * write_blocks;
#endif
if (pool.size_write() < write_blocks) {
LOG1 << "WARNING: enlarging PPQ write pool to " <<
write_blocks << " blocks = " <<
write_blocks * block_size / 1024 / 1024 << " MiB";
pool.resize_write(write_blocks);
}
}
protected:
//! prepare the external_array for writing using multiway_merge() with
//! num_threads. this method is called by the external_array_writer's
//! constructor.
void prepare_write(const size_t num_threads)
{
prepare_write_pool(*m_pool, num_threads);
}
//! finish the writing phase after multiway_merge() filled the vector. this
//! method is called by the external_array_writer's destructor..
void finish_write()
{
// check that all blocks where written
for (size_t i = 0; i < m_num_blocks; ++i)
assert(m_blocks[i] == nullptr);
// compatibility to the block write interface
m_size = m_capacity;
m_index = 0;
m_end_index = 0;
m_unhinted_block = 0;
m_write_phase = false;
}
//! Called by the external_array_writer to read a block from disk into
//! m_blocks[]. If the block is marked as uninitialized, then no read is
//! performed. This is the usual case, and in theory, no block ever has be
//! re-read from disk, since all can be written fully. However, we do
//! support re-reading blocks for debugging purposes inside
//! multiway_merge(), in a full performance build re-reading never occurs.
void read_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] == nullptr ||
m_blocks[block_index] == reinterpret_cast<block_type*>(1));
if (m_blocks[block_index] == reinterpret_cast<block_type*>(1))
{
// special marker: this block is uninitialized -> no need to read
// from disk.
m_blocks[block_index] = m_pool->steal();
}
else
{
// block was already written, have to read from EM.
LOG << "ea[" << this << "]: "
"read_block needs to re-read block index=" << block_index;
static bool s_warned = false;
if (!s_warned)
{
s_warned = true;
LOG1 << "ppq::external_array[" << this << "] "
"writer requested to re-read block from EM.\n"
"This should never occur in full-performance mode, "
"verify that you run in debug mode.";
}
// this re-reading is not necessary for full performance builds, so
// we immediately wait for the I/O to be completed.
m_blocks[block_index] = m_pool->steal();
request_ptr req = m_pool->read(m_blocks[block_index], m_bids[block_index]);
req->wait();
assert(req->poll());
assert(m_blocks[block_index]);
}
}
//! Called by the external_array_writer to write a block from m_blocks[] to
//! disk. Prior to writing and releasing the memory, extra information is
//! preserved.
void write_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] != nullptr &&
m_blocks[block_index] != reinterpret_cast<block_type*>(1));
// calculate minimum and maximum values
const size_t this_block_items =
std::min<size_t>(block_items, m_capacity - block_index * static_cast<external_size_type>(block_items));
LOG << "ea[" << this << "]: write_block index=" << block_index <<
" this_block_items=" << this_block_items;
assert(this_block_items > 0);
block_type& this_block = *m_blocks[block_index];
m_minima[block_index] = this_block[0];
// write out block (in background)
m_pool->write(m_blocks[block_index], m_bids[block_index]);
m_blocks[block_index] = nullptr;
}
public:
//! \name Prefetching Hints
//! \{
//! Prefetch the next unhinted block, requires one free read block from the
//! global pool.
void hint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block i
size_t i = m_unhinted_block++;
LOG << "ea[" << this << "]: prefetching block_index=" << i;
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == nullptr);
// steal block from pool, but also perform read via pool, since this
// checks the associated write_pool.
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
//! Returns if there is data in EM, that's not already hinted
//! to the prefetcher.
bool has_unhinted_em_data() const
{
return (m_unhinted_block < m_num_blocks);
}
//! Returns the smallest element of the next hint candidate (the block
//! after the last hinted one).
const value_type & get_next_hintable_min() const
{
assert(m_unhinted_block < m_num_blocks);
return m_minima[m_unhinted_block];
}
//! Returns the number of hinted blocks.
size_t num_hinted_blocks() const
{
assert(get_end_block_index() <= m_unhinted_block);
return m_unhinted_block - get_end_block_index();
}
//! This method prepares rebuilding the hints (this is done after creating
//! a new EA in order to always have globally the n blocks hinted which
//! will be fetched first). Resets m_unhinted_block to the first block not
//! in RAM. Thereafter prehint_next_block() is used to advance this index.
//! finish_rebuilding_hints() should be called after placing all hints in
//! order to clean up the prefetch pool.
void rebuild_hints_prepare()
{
m_old_unhinted_block = m_unhinted_block;
m_unhinted_block = get_end_block_index();
assert(get_end_block_index() <= m_old_unhinted_block);
}
//! Advance m_unhinted_block index without actually prefetching.
void rebuild_hints_prehint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block after cancellations.
LOG << "ea[" << this << "]: pre-hint of" <<
" block_index=" << m_unhinted_block;
++m_unhinted_block;
}
//! Cancel hints which aren't needed anymore from the prefetcher and fixes
//! it's size. prepare_rebuilding_hints() must be called before!
void rebuild_hints_cancel()
{
for (size_t i = m_unhinted_block; i < m_old_unhinted_block; ++i) {
LOG << "ea[" << this << "]: discarding prefetch hint on"
" block " << i;
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = nullptr;
m_requests[i] = request_ptr();
}
}
//! Perform real-hinting of pre-hinted blocks, since now canceled blocks
//! are available.
void rebuild_hints_finish()
{
for (size_t i = m_old_unhinted_block; i < m_unhinted_block; ++i)
{
LOG << "ea[" << this << "]: perform real-hinting of"
" block " << i;
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == nullptr);
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
}
//! \}
public:
//! \name Waiting and Removal
//! \{
//! Waits until the next prefetched block is read into RAM, then polls for
//! any further blocks that are done as well. Returns how many blocks were
//! successfully read.
size_t wait_next_blocks()
{
size_t begin = get_end_block_index(), i = begin;
LOG << "ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index;
assert(has_em_data());
assert(i < m_unhinted_block);
assert(m_bids[i].valid());
assert(m_requests[i].valid());
// wait for prefetched request to finish.
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
// poll further hinted blocks if already done
while (i < m_unhinted_block && m_requests[i]->poll())
{
LOG << "ea[" << this << "]: poll-ok for" <<
" block index=" << i <<
" end_index=" << m_end_index;
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * static_cast<external_size_type>(block_items));
return i - begin;
}
//! Waits until all hinted blocks are read into RAM. Returns how many
//! blocks were successfully read.
size_t wait_all_hinted_blocks()
{
size_t begin = get_end_block_index(), i = begin;
while (i < m_unhinted_block)
{
LOG << "wait_all_hinted_blocks(): ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index;
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * static_cast<external_size_type>(block_items));
return i - begin;
}
//! Returns the number of blocks loaded in RAM.
size_t num_used_blocks() const
{
return get_end_block_index() - (m_index / block_items);
}
//! Removes the first n elements from the array. Returns the number of
//! blocks released into the block pool.
size_t remove_items(const size_t n)
{
assert(m_index + n <= m_capacity);
assert(m_index + n <= m_end_index);
assert(m_size >= n);
LOG << "ea[" << this << "]: remove " << n << " items";
if (n == 0)
return 0;
const size_t block_index = m_index / block_items;
const size_t index_after = m_index + n;
size_t block_index_after = index_after / block_items;
size_t local_index_after = index_after % block_items;
if (m_size == n && local_index_after != 0) // end of EA
++block_index_after;
assert(block_index_after <= m_num_blocks);
bid_iterator i_begin = m_bids.begin() + block_index;
bid_iterator i_end = m_bids.begin() + block_index_after;
assert(i_begin <= i_end);
foxxll::block_manager::get_instance()->delete_blocks(i_begin, i_end);
for (size_t i = block_index; i < block_index_after; ++i) {
assert(block_valid(i));
// return block to pool
m_pool->add_prefetch(m_blocks[i]);
}
m_index = index_after;
m_size -= n;
size_t blocks_freed = block_index_after - block_index;
LOG << "ea[" << this << "]: after remove:" <<
" index_after=" << index_after <<
" block_index_after=" << block_index_after <<
" local_index_after=" << local_index_after <<
" blocks_freed=" << blocks_freed <<
" num_blocks=" << m_num_blocks <<
" capacity=" << m_capacity;
assert(block_index_after <= m_num_blocks);
// at most one block outside of the currently loaded range
assert(block_index_after <= get_end_block_index());
return blocks_freed;
}
//! \}
protected:
//! Returns if the block with the given index is completely fetched.
bool block_valid(size_t block_index) const
{
if (!m_write_phase) {
if (block_index >= m_num_blocks) return false;
return (m_requests[block_index] && m_requests[block_index]->poll());
}
else {
return (m_blocks[block_index] != 0);
}
}
//! Updates the m_block_pointers vector.
//! Should be called after any steal() or read() operation.
//! This is necessary for the iterators to work properly.
inline void update_block_pointers(size_t block_index)
{
LOG << "ea[" << this << "]: updating block pointers for " << block_index;
m_block_pointers[block_index].first = m_blocks[block_index]->begin();
if (block_index + 1 != m_num_blocks)
m_block_pointers[block_index].second = m_blocks[block_index]->end();
else
m_block_pointers[block_index].second =
m_block_pointers[block_index].first
+ (m_capacity - block_index * block_items);
assert(m_block_pointers[block_index].first != nullptr);
assert(m_block_pointers[block_index].second != nullptr);
}
inline size_t last_block_items()
{
size_t mod = m_capacity % block_items;
return (mod > 0) ? mod : static_cast<size_t>(block_items);
}
};
/**
* An external_array can only be written using an external_array_writer
* object. The writer objects provides iterators which are designed to be used
* by stxxl::parallel::multiway_merge() to write the external memory blocks in
* parallel. Thus in the writer we coordinate thread-safe access to the blocks
* using reference counting.
*
* An external_array_writer::iterator has two states: normal and "live". In
* normal mode, the iterator only has a valid index into the external array's
* items. In normal mode, only index calculations are possible. Once
* operator*() is called, the iterators goes into "live" mode by requesting
* access to the corresponding block. Using reference counting the blocks is
* written once all iterators are finished with the corresponding block. Since
* with operator*() we cannot know if the value is going to be written or read,
* when going to live mode, the block must be read from EM. This read overhead,
* however, is optimized by marking blocks as uninitialized in external_array,
* and skipping reads for them. In a full performance build, no block needs to
* be read from disk. Reads only occur in debug mode, when the results are
* verified.
*
* The iterator's normal/live mode only stays active for the individual
* iterator object. When an iterator is copied/assigned/calculated with the
* mode is NOT inherited! The exception is prefix operator ++, which is used by
* multiway_merge() to fill an array. Thus the implementation of the iterator
* heavily depends on the behavior of multiway_merge() and is optimized for it.
*/
template <class ExternalArrayType>
class external_array_writer
{
public:
using ea_type = ExternalArrayType;
using self_type = external_array_writer;
using value_type = typename ea_type::value_type;
using block_type = typename ea_type::block_type;
//! prototype declaration of nested class.
class iterator;
//! scope based debug variable
static const bool debug = false;
protected:
//! reference to the external array to be written
ea_type& m_ea;
#ifndef NDEBUG
//! total number of iterators referencing this writer
unsigned int m_ref_total;
#endif
//! reference counters for the number of live iterators on the
//! corresponding block in external_array.
std::vector<unsigned int> m_ref_count;
//! mutex for reference counting array (this is actually nicer than
//! openmp's critical)
std::mutex m_mutex;
//! optimization: hold live iterators for the expected boundary blocks of
//! multiway_merge().
std::vector<iterator> m_live_boundary;
protected:
//! read block into memory and increase reference count (called when an
//! iterator goes live on the block).
block_type * get_block_ref(size_t block_index)
{
std::unique_lock<std::mutex> lock(m_mutex);
assert(block_index < m_ea.num_blocks());
unsigned int ref = m_ref_count[block_index]++;
#ifndef NDEBUG
++m_ref_total;
#endif
if (ref == 0) {
LOG << "get_block_ref block_index=" << block_index <<
" ref=" << ref << " reading.";
m_ea.read_block(block_index);
}
else {
LOG << "get_block_ref block_index=" << block_index <<
" ref=" << ref;
}
return m_ea.m_blocks[block_index];
}
//! decrease reference count on the block, and possibly write it to disk
//! (called when an iterator releases live mode).
void free_block_ref(size_t block_index)
{
std::unique_lock<std::mutex> lock(m_mutex);
assert(block_index < m_ea.num_blocks());
#ifndef NDEBUG
assert(m_ref_total > 0);
--m_ref_total;
#endif
unsigned int ref = --m_ref_count[block_index];
if (ref == 0) {
LOG << "free_block_ref block_index=" << block_index <<
" ref=" << ref << " written.";
m_ea.write_block(block_index);
}
else {
LOG << "free_block_ref block_index=" << block_index <<
" ref=" << ref;
}
}
//! allow access to the block_ref functions
friend class iterator;
public:
/**
* An iterator which can be used to write (and read) an external_array via
* an external_array_writer. See the documentation of external_array_writer.
*/
class iterator
{
public:
using writer_type = external_array_writer;
using ea_type = ExternalArrayType;
using value_type = typename ea_type::value_type;
using reference = value_type &;
using pointer = value_type *;
using difference_type = ptrdiff_t;
using iterator_category = std::random_access_iterator_tag;
using self_type = iterator;
static const size_t block_items = ea_type::block_items;
//! scope based debug variable
static const bool debug = false;
protected:
//! pointer to the external array containing the elements
writer_type* m_writer;
//! when operator* or operator-> are called, then the iterator goes
//! live and allocates a reference to the block's data (possibly
//! reading it from EM).
bool m_live;
//! index of the current element, absolute in the external array
external_size_type m_index;
//! index of the current element's block in the external array's block
//! list. undefined while m_live is false.
size_t m_block_index;
//! pointer to the referenced block. undefined while m_live is false.
block_type* m_block;
//! pointer to the current element inside the referenced block.
//! undefined while m_live is false.
size_t m_current;
public:
//! default constructor (should not be used directly)
iterator()
: m_writer(nullptr), m_live(false), m_index(0)
{ }
//! construct a new iterator
iterator(writer_type* writer, external_size_type index)
: m_writer(writer),
m_live(false),
m_index(index)
{
LOG << "Construct iterator for index " << m_index;
}
//! copy an iterator, the new iterator is _not_ automatically live!
iterator(const iterator& other)
: m_writer(other.m_writer),
m_live(false),
m_index(other.m_index)
{
LOG << "Copy-Construct iterator for index " << m_index;
}
//! assign an iterator, the assigned iterator is not automatically live!
iterator& operator = (const iterator& other)
{
if (&other != this)
{
LOG << "Assign iterator to index " << other.m_index;
if (m_live)
m_writer->free_block_ref(m_block_index);
m_writer = other.m_writer;
m_live = false;
m_index = other.m_index;
}
return *this;
}
~iterator()
{
if (!m_live) return; // no need for cleanup
m_writer->free_block_ref(m_block_index);
LOG << "Destruction of iterator for index " << m_index <<
" in block " << m_index / block_items;
}
//! return the current absolute index inside the external array.
external_size_type get_index() const
{
return m_index;
}
//! allocates a reference to the block's data (possibly reading it from
//! EM).
void make_live()
{
assert(!m_live);
// calculate block and index inside
m_block_index = m_index / block_items;
m_current = m_index % block_items;
LOG << "operator*() live request for index=" << m_index <<
" block_index=" << m_block_index <<
" m_current=" << m_current;
// get block reference
m_block = m_writer->get_block_ref(m_block_index);
m_live = true;
}
//! access the current item
reference operator * ()
{
if (TLX_UNLIKELY(!m_live))
make_live();
return (*m_block)[m_current];
}
//! access the current item
pointer operator -> ()
{
return &(operator * ());
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
if (TLX_UNLIKELY(!m_live)) return *this;
// if index stays in the same block, everything is fine
++m_current;
if (TLX_LIKELY(m_current != block_items)) return *this;
// release current block
m_writer->free_block_ref(m_block_index);
m_live = false;
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_writer, m_index + addend);
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_writer, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
};
public:
explicit external_array_writer(ea_type& ea, unsigned int num_threads = 0)
: m_ea(ea),
m_ref_count(ea.num_blocks(), 0)
{
#ifndef NDEBUG
m_ref_total = 0;
#endif
#if STXXL_PARALLEL
if (num_threads == 0)
num_threads = omp_get_max_threads();
#else
if (num_threads == 0)
num_threads = 1;
#endif
m_ea.prepare_write(num_threads);
// optimization: hold live iterators for the boundary blocks which two
// threads write to. this prohibits the blocks to be written to disk
// and read again.
double step = static_cast<double>(m_ea.capacity()) / num_threads;
m_live_boundary.resize(num_threads - 1);
for (unsigned int i = 0; i < num_threads - 1; ++i)
{
auto index = static_cast<external_size_type>((i + 1) * step);
LOG << "hold index " << index <<
" in block " << index / ea_type::block_items;
m_live_boundary[i] = iterator(this, index);
m_live_boundary[i].make_live();
}
}
//! non-copyable: delete copy-constructor
external_array_writer(const external_array_writer&) = delete;
//! non-copyable: delete assignment operator
external_array_writer& operator = (const external_array_writer&) = delete;
~external_array_writer()
{
m_live_boundary.clear(); // release block boundaries
#ifndef NDEBUG
assert(m_ref_total == 0);
#endif
m_ea.finish_write();
}
iterator begin()
{
return iterator(this, 0);
}
iterator end()
{
return iterator(this, m_ea.capacity());
}
};
/*!
* The minima_tree contains minima from all sources inside the PPQ. It contains
* four substructures: winner trees for insertion heaps, internal and external
* arrays, each containing the minima from all currently allocated
* structures. These three sources, plus the deletion buffer are combined using
* a "head" inner tree containing only up to four item.
*/
template <class ParentType>
class minima_tree
{
public:
using parent_type = ParentType;
using self_type = minima_tree<ParentType>;
using compare_type = typename parent_type::inv_compare_type;
using value_type = typename parent_type::value_type;
using proc_vector_type = typename parent_type::proc_vector_type;
using ias_type = typename parent_type::internal_arrays_type;
using eas_type = typename parent_type::external_arrays_type;
static const unsigned initial_ia_size = 2;
static const unsigned initial_ea_size = 2;
protected:
//! WinnerTree-Comparator for the head winner tree. It accesses all
//! relevant data structures from the priority queue.
struct head_comp
{
self_type& m_parent;
proc_vector_type& m_proc;
ias_type& m_ias;
const compare_type& m_compare;
head_comp(self_type& parent, proc_vector_type& proc,
ias_type& ias, const compare_type& compare)
: m_parent(parent),
m_proc(proc),
m_ias(ias),
m_compare(compare)
{ }
const value_type & get_value(size_t input) const
{
switch (input) {
case HEAP:
return m_proc[m_parent.m_heaps.top()]->insertion_heap[0];
case IA:
return m_ias[m_parent.m_ia.top()].get_min();
case EB:
return m_parent.m_parent.m_extract_buffer[
m_parent.m_parent.m_extract_buffer_index
];
default:
abort();
}
}
bool operator () (const size_t a, const size_t b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the insertion heaps winner tree.
struct heaps_comp
{
proc_vector_type& m_proc;
const compare_type& m_compare;
heaps_comp(proc_vector_type& proc, const compare_type& compare)
: m_proc(proc), m_compare(compare)
{ }
const value_type & get_value(size_t index) const
{
return m_proc[index]->insertion_heap[0];
}
bool operator () (const size_t a, const size_t b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the internal arrays winner tree.
struct ia_comp
{
ias_type& m_ias;
const compare_type& m_compare;
ia_comp(ias_type& ias, const compare_type& compare)
: m_ias(ias), m_compare(compare)
{ }
bool operator () (const size_t a, const size_t b) const
{
return m_compare(m_ias[a].get_min(), m_ias[b].get_min());
}
};
protected:
//! The priority queue
parent_type& m_parent;
//! value_type comparator
const compare_type& m_compare;
//! Comperator instances
head_comp m_head_comp;
heaps_comp m_heaps_comp;
ia_comp m_ia_comp;
//! The winner trees
winner_tree<head_comp> m_head;
winner_tree<heaps_comp> m_heaps;
winner_tree<ia_comp> m_ia;
public:
//! Entries in the head winner tree.
enum Types : size_t {
HEAP = 0,
IA = 1,
EB = 2,
TYPE_ERROR = 3
};
//! Construct the tree of minima sources.
explicit minima_tree(parent_type& parent)
: m_parent(parent),
m_compare(parent.m_inv_compare),
// construct comparators
m_head_comp(*this, parent.m_proc,
parent.m_internal_arrays, m_compare),
m_heaps_comp(parent.m_proc, m_compare),
m_ia_comp(parent.m_internal_arrays, m_compare),
// construct header winner tree
m_head(3, m_head_comp),
m_heaps(m_parent.m_num_insertion_heaps, m_heaps_comp),
m_ia(initial_ia_size, m_ia_comp)
{ }
//! Return smallest items of head winner tree.
std::pair<unsigned, unsigned> top()
{
const size_t type = m_head.top();
switch (type)
{
case HEAP:
return std::make_pair(HEAP, m_heaps.top());
case IA:
return std::make_pair(IA, m_ia.top());
case EB:
return std::make_pair(EB, 0);
default:
return std::make_pair(TYPE_ERROR, 0);
}
}
//! Update minima tree after an item from the heap index was removed.
void update_heap(const size_t index)
{
m_heaps.notify_change(index);
m_head.notify_change(HEAP);
}
//! Update minima tree after an item of the extract buffer was removed.
void update_extract_buffer()
{
m_head.notify_change(EB);
}
//! Update minima tree after an item from an internal array was removed.
void update_internal_array(const size_t index)
{
m_ia.notify_change(index);
m_head.notify_change(IA);
}
//! Add a newly created internal array to the minima tree.
void add_internal_array(const size_t index)
{
m_ia.activate_player(index);
m_head.notify_change(IA);
}
//! Remove an insertion heap from the minima tree.
void deactivate_heap(const size_t index)
{
m_heaps.deactivate_player(index);
if (!m_heaps.empty())
m_head.notify_change(HEAP);
else
m_head.deactivate_player(HEAP);
}
//! Remove the extract buffer from the minima tree.
void deactivate_extract_buffer()
{
m_head.deactivate_player(EB);
}
//! Remove an internal array from the minima tree.
void deactivate_internal_array(const size_t index)
{
m_ia.deactivate_player(index);
if (!m_ia.empty())
m_head.notify_change(IA);
else
m_head.deactivate_player(IA);
}
//! Remove all insertion heaps from the minima tree.
void clear_heaps()
{
m_heaps.clear();
m_head.deactivate_player(HEAP);
}
//! Remove all internal arrays from the minima tree.
void clear_internal_arrays()
{
m_ia.resize_and_clear(initial_ia_size);
m_head.deactivate_player(IA);
}
void rebuild_internal_arrays()
{
if (!m_parent.m_internal_arrays.empty())
{
m_ia.resize_and_rebuild(m_parent.m_internal_arrays.size());
m_head.notify_change(IA);
}
else
{
m_head.deactivate_player(IA);
}
}
//! Return size of internal arrays minima tree
size_t ia_slots() const
{
return m_ia.num_slots();
}
//! Returns a readable representation of the winner tree as string.
std::string to_string() const
{
std::ostringstream ss;
ss << "Head:" << std::endl << m_head.to_string() << std::endl;
ss << "Heaps:" << std::endl << m_heaps.to_string() << std::endl;
ss << "IA:" << std::endl << m_ia.to_string() << std::endl;
return ss.str();
}
//! Prints statistical data.
void print_stats() const
{
LOG1 << "Head winner tree stats:";
m_head.print_stats();
LOG1 << "Heaps winner tree stats:";
m_heaps.print_stats();
LOG1 << "IA winner tree stats:";
m_ia.print_stats();
}
};
} // namespace ppq_local
/*!
* Parallelized External Memory Priority Queue.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam CompareType The comparator type used to determine whether one
* element is smaller than another element.
*
* \tparam DefaultMemSize Maximum memory consumption by the queue. Can be
* overwritten by the constructor. Default = 1 GiB.
*
* \tparam MaxItems Maximum number of elements the queue contains at one
* time. Default = 0 = unlimited. This is no hard limit and only used for
* optimization. Can be overwritten by the constructor.
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* foxxll::default_alloc_strategy.
*/
template <
class ValueType,
class CompareType = std::less<ValueType>,
class AllocStrategy = foxxll::default_alloc_strategy,
size_t BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
size_t DefaultMemSize = 1* 1024L* 1024L* 1024L,
external_size_type MaxItems = 0
>
class parallel_priority_queue
{
//! \name Types
//! \{
public:
using value_type = ValueType;
using compare_type = CompareType;
using alloc_strategy = AllocStrategy;
static const size_t block_size = BlockSize;
using size_type = external_size_type;
using block_type = foxxll::typed_block<block_size, value_type>;
using bid_vector = std::vector<foxxll::BID<block_size> >;
using bids_container_type = bid_vector;
using pool_type = foxxll::read_write_pool<block_type>;
using internal_array_type = ppq_local::internal_array<value_type>;
using external_array_type = ppq_local::external_array<value_type, block_size, AllocStrategy>;
using external_array_writer_type = typename external_array_type::writer_type;
using value_iterator = typename std::vector<value_type>::iterator;
using iterator = typename internal_array_type::iterator;
using iterator_pair_type = std::pair<iterator, iterator>;
static const bool debug = false;
//! currently global public tuning parameter:
size_t c_max_internal_level_size;
//! currently global public tuning parameter:
size_t c_max_external_level_size;
protected:
//! type of insertion heap itself
using heap_type = std::vector<value_type>;
//! type of internal arrays vector
using internal_arrays_type = typename stxxl::swap_vector<internal_array_type>;
//! type of external arrays vector
using external_arrays_type = typename stxxl::swap_vector<external_array_type>;
//! type of minima tree combining the structures
using minima_type = ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> >;
//! allow minima tree access to internal data structures
friend class ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> >;
//! Inverse comparison functor
struct inv_compare_type
{
const compare_type& compare;
explicit inv_compare_type(const compare_type& c)
: compare(c)
{ }
bool operator () (const value_type& x, const value_type& y) const
{
return compare(y, x);
}
};
//! <-Comparator for value_type
compare_type m_compare;
//! >-Comparator for value_type
inv_compare_type m_inv_compare;
//! Defines if statistics are gathered: dummy_custom_stats_counter or
//! custom_stats_counter
using stats_counter = dummy_custom_stats_counter<uint64_t>;
//! Defines if statistics are gathered: fake_timer or timer
using stats_timer = foxxll::fake_timer;
//! \}
//! \name Compile-Time Parameters
//! \{
//! Merge sorted heaps when flushing into an internal array.
//! Pro: Reduces the risk of a large winner tree
//! Con: Flush insertion heaps becomes slower.
static const bool c_merge_sorted_heaps = true;
//! Default number of write buffer block for a new external array being
//! filled.
static const unsigned c_num_write_buffer_blocks = 14;
//! Defines for how much external arrays memory should be reserved in the
//! constructor.
static const unsigned c_num_reserved_external_arrays = 10;
//! Size of a single insertion heap in Byte, if not defined otherwise in
//! the constructor. Default: 1 MiB
static const size_type c_default_single_heap_ram = 1L * 1024L * 1024L;
//! Default limit of the extract buffer ram consumption as share of total
//! ram
static constexpr double c_default_extract_buffer_ram_part = 0.05;
/*!
* Limit the size of the extract buffer to an absolute value.
*
* The actual size can be set using the extract_buffer_ram parameter of the
* constructor. If this parameter is not set, the value is calculated by
* (total_ram*c_default_extract_buffer_ram_part)
*
* If c_limit_extract_buffer==false, the memory consumption of the extract
* buffer is only limited by the number of external and internal
* arrays. This is considered in memory management using the
* ram_per_external_array and ram_per_internal_array values. Attention:
* Each internal array reserves space for the extract buffer in the size of
* all heaps together.
*/
static const bool c_limit_extract_buffer = true;
//! For bulks of size up to c_single_insert_limit sequential single insert
//! is faster than bulk_push.
static const unsigned c_single_insert_limit = 100;
//! \}
//! \name Parameters and Sizes for Memory Allocation Policy
//! Number of insertion heaps. Usually equal to the number of CPUs.
size_t m_num_insertion_heaps;
//! Capacity of one inserion heap
size_type m_insertion_heap_capacity;
//! Return size of insertion heap reservation in bytes
size_type insertion_heap_int_memory() const
{
return m_insertion_heap_capacity * sizeof(value_type);
}
//! Total amount of internal memory
size_type m_mem_total;
//! Maximum size of extract buffer in number of elements
//! Only relevant if c_limit_extract_buffer==true
size_type m_extract_buffer_limit;
//! Size of all insertion heaps together in bytes
size_type m_mem_for_heaps;
//! Number of read/prefetch blocks per external array.
float m_num_read_blocks_per_ea;
//! Total number of read/prefetch buffer blocks
size_t m_num_read_blocks;
//! number of currently hinted prefetch blocks
size_t m_num_hinted_blocks;
//! number of currently loaded blocks
size_t m_num_used_read_blocks;
//! Free memory in bytes
size_type m_mem_left;
//! \}
//! Flag if inside a bulk_push sequence.
bool m_in_bulk_push;
//! If the bulk currently being inserted is very large, this boolean is set
//! and bulk_push just accumulate the elements for eventual sorting.
bool m_is_very_large_bulk;
//! First index in m_external_arrays that was not re-hinted during a
//! bulk_push sequence.
size_t m_bulk_first_delayed_external_array;
//! Index of the currently smallest element in the extract buffer
size_type m_extract_buffer_index;
//! \name Number of elements currently in the data structures
//! \{
//! Number of elements int the insertion heaps
size_type m_heaps_size;
//! Number of elements in the extract buffer
size_type m_extract_buffer_size;
//! Number of elements in the internal arrays
size_type m_internal_size;
//! Number of elements in the external arrays
size_type m_external_size;
//! \}
//! \name Data Holding Structures
//! \{
//! A struct containing the local insertion heap and other information
//! _local_ to a processor.
struct ProcessorData
{
//! The heaps where new elements are usually inserted into
heap_type insertion_heap;
//! The number of items inserted into the insheap during bulk parallel
//! access.
size_type heap_add_size;
};
using proc_vector_type = std::vector<ProcessorData*>;
//! Array of processor local data structures, including the insertion heaps.
proc_vector_type m_proc;
//! Prefetch and write buffer pool for external arrays (has to be in front
//! of m_external_arrays)
pool_type m_pool;
//! The extract buffer where external (and internal) arrays are merged into
//! for extracting
std::vector<value_type> m_extract_buffer;
//! The sorted arrays in internal memory
internal_arrays_type m_internal_arrays;
//! The sorted arrays in external memory
external_arrays_type m_external_arrays;
//! The aggregated pushes. They cannot be extracted yet.
std::vector<value_type> m_aggregated_pushes;
//! The maximum number of internal array levels.
static constexpr size_t kMaxInternalLevels = 8;
//! The number of internal arrays on each level, we use plain array.
size_t m_internal_levels[kMaxInternalLevels];
//! The maximum number of external array levels.
static constexpr size_t kMaxExternalLevels = 8;
//! The number of external arrays on each level, we use plain array.
size_t m_external_levels[kMaxExternalLevels];
//! The winner tree containing the smallest values of all sources
//! where the globally smallest element could come from.
minima_type m_minima;
//! Compares the largest accessible value of two external arrays.
struct external_min_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
external_min_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_block_min(),
m_eas[b].get_next_block_min());
}
} m_external_min_comparator;
//! Tracks the largest accessible values of the external arrays if there
//! is unaccessible data in EM. The winning array is the first one that
//! needs to fetch further data from EM. Used in calculate_merge_sequences.
winner_tree<external_min_comparator> m_external_min_tree;
//! Compares the largest value of the block hinted the latest of two
//! external arrays.
struct hint_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
hint_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_hintable_min(),
m_eas[b].get_next_hintable_min());
}
} m_hint_comparator;
//! Tracks the largest values of the block hinted the latest of the
//! external arrays if there is unaccessible data in EM. The winning
//! array is the first one that needs to fetch further data from EM.
//! Used for prefetch hints.
winner_tree<hint_comparator> m_hint_tree;
//! Random number generator for randomly selecting a heap in sequential
//! push()
std::default_random_engine m_rng;
//! \}
/*
* Helper function to remove empty internal/external arrays.
*/
//! Unary operator which returns true if the external array has run empty.
struct empty_external_array_eraser {
bool operator () (external_array_type& a) const
{ return a.empty(); }
};
//! Unary operator which returns true if the internal array has run empty.
struct empty_internal_array_eraser {
bool operator () (internal_array_type& a) const
{ return a.empty(); }
};
//! Clean up empty internal arrays, free their memory and capacity
void cleanup_internal_arrays()
{
typename internal_arrays_type::iterator swap_end =
stxxl::swap_remove_if(m_internal_arrays.begin(),
m_internal_arrays.end(),
empty_internal_array_eraser());
for (typename internal_arrays_type::iterator ia = swap_end;
ia != m_internal_arrays.end(); ++ia)
{
m_mem_left += ia->int_memory();
--m_internal_levels[ia->level()];
}
if (swap_end != m_internal_arrays.end())
LOG0 << "cleanup_internal_arrays" <<
" cleaned=" << m_internal_arrays.end() - swap_end;
m_internal_arrays.erase(swap_end, m_internal_arrays.end());
m_minima.rebuild_internal_arrays();
}
//! Clean up empty external arrays, free their memory and capacity
void cleanup_external_arrays()
{
using ea_iterator = typename external_arrays_type::iterator;
empty_external_array_eraser pred;
// The following is a modified implementation of swap_remove_if().
// Updates m_external_min_tree accordingly.
ea_iterator first = m_external_arrays.begin();
ea_iterator last = m_external_arrays.end();
ea_iterator swap_end = first;
size_t size = m_external_arrays.end() - m_external_arrays.begin();
size_t first_removed = size;
while (first != last)
{
if (!pred(*first))
{
using std::swap;
swap(*first, *swap_end);
++swap_end;
}
else if (first_removed >= size)
{
first_removed = first - m_external_arrays.begin();
}
++first;
}
// subtract memory of EAs, which will be freed
for (ea_iterator ea = swap_end; ea != last; ++ea) {
m_mem_left += ea->int_memory();
--m_external_levels[ea->level()];
}
size_t swap_end_index = swap_end - m_external_arrays.begin();
// Deactivating all affected players first.
// Otherwise there might be outdated comparisons.
for (size_t i = size; i != first_removed; ) {
--i;
m_external_min_tree.deactivate_player_step(i);
// TODO delay if (m_in_bulk_push)?
m_hint_tree.deactivate_player_step(i);
}
// Replay moved arrays.
for (size_t i = first_removed; i < swap_end_index; ++i) {
update_external_min_tree(i);
// TODO delay if (m_in_bulk_push)?
update_hint_tree(i);
}
LOG << "Removed " << m_external_arrays.end() - swap_end <<
" empty external arrays.";
m_external_arrays.erase(swap_end, m_external_arrays.end());
resize_read_pool(); // shrinks read/prefetch pool
}
/*!
* SiftUp a new element from the last position in the heap, reestablishing
* the heap invariant. This is identical to std::push_heap, except that it
* returns the last element modified by siftUp. Thus we can identify if the
* minimum may have changed.
*/
template <typename RandomAccessIterator, typename HeapCompareType>
static inline size_t
push_heap(RandomAccessIterator first, RandomAccessIterator last,
HeapCompareType comp)
{
using value_type =
typename std::iterator_traits<RandomAccessIterator>::value_type;
value_type value = std::move(*(last - 1));
size_t index = (last - first) - 1;
size_t parent = (index - 1) / 2;
while (index > 0 && comp(*(first + parent), value))
{
*(first + index) = std::move(*(first + parent));
index = parent;
parent = (index - 1) / 2;
}
*(first + index) = std::move(value);
return index;
}
public:
//! \name Initialization
//! \{
/*!
* Constructor.
*
* \param compare Comparator for priority queue, which is a Max-PQ.
*
* \param total_ram Maximum RAM usage. 0 = Default = Use the template
* value DefaultMemSize.
*
* \param num_read_blocks_per_ea Number of read blocks per external
* array. Default = 1.5f
*
* \param num_write_buffer_blocks Number of write buffer blocks for a new
* external array being filled. 0 = Default = c_num_write_buffer_blocks
*
* \param num_insertion_heaps Number of insertion heaps. 0 = Default =
* Determine by omp_get_max_threads().
*
* \param single_heap_ram Memory usage for a single insertion heap.
* Default = c_single_heap_ram.
*
* \param extract_buffer_ram Memory usage for the extract buffer. Only
* relevant if c_limit_extract_buffer==true. 0 = Default = total_ram *
* c_default_extract_buffer_ram_part.
*/
explicit parallel_priority_queue(
const compare_type& compare = compare_type(),
const size_type total_ram = DefaultMemSize,
const float num_read_blocks_per_ea = 1.5f,
const size_t num_write_buffer_blocks = c_num_write_buffer_blocks,
unsigned num_insertion_heaps = 0,
const size_type single_heap_ram = c_default_single_heap_ram,
const size_type extract_buffer_ram = 0)
: c_max_internal_level_size(64),
c_max_external_level_size(64),
m_compare(compare),
m_inv_compare(m_compare),
// Parameters and Sizes for Memory Allocation Policy
#if STXXL_PARALLEL
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : omp_get_max_threads()),
#else
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : 1),
#endif
m_insertion_heap_capacity(single_heap_ram / sizeof(value_type)),
m_mem_total(total_ram),
m_mem_for_heaps(m_num_insertion_heaps * single_heap_ram),
m_num_read_blocks_per_ea(num_read_blocks_per_ea),
m_num_read_blocks(0),
m_num_hinted_blocks(0),
m_num_used_read_blocks(0),
// (unnamed)
m_in_bulk_push(false),
m_is_very_large_bulk(false),
m_extract_buffer_index(0),
// Number of elements currently in the data structures
m_heaps_size(0),
m_extract_buffer_size(0),
m_internal_size(0),
m_external_size(0),
// Data Holding Structures
m_proc(m_num_insertion_heaps),
m_pool(0, num_write_buffer_blocks),
m_external_arrays(),
m_minima(*this),
m_external_min_comparator(m_external_arrays, m_inv_compare),
m_external_min_tree(4, m_external_min_comparator),
m_hint_comparator(m_external_arrays, m_inv_compare),
m_hint_tree(4, m_hint_comparator),
#ifndef STXXL_PARALLEL
m_rng(stxxl::seed_sequence::get_ref().get_next_seed()),
#endif
// flags
m_limit_extract(false)
{
#if STXXL_PARALLEL
if (!omp_get_nested()) {
omp_set_nested(1);
if (!omp_get_nested()) {
die("Could not enable OpenMP's nested parallelism, "
"however, the PPQ requires this OpenMP feature.");
}
}
#else
LOG1 << "You are using stxxl::parallel_priority_queue without "
"support for OpenMP parallelism.\n"
"This is probably not what you want, so check the "
"compilation settings.";
#endif
if (m_num_read_blocks_per_ea < 1.0) {
die("PPQ: requires num_read_blocks_per_ea >= 1.0, however,"
" it is " << m_num_read_blocks_per_ea);
}
if (c_limit_extract_buffer) {
m_extract_buffer_limit =
(extract_buffer_ram > 0)
? extract_buffer_ram / sizeof(value_type)
: static_cast<size_type>(
static_cast<double>(m_mem_total) *
c_default_extract_buffer_ram_part / sizeof(value_type));
}
for (size_t i = 0; i < kMaxInternalLevels; ++i)
m_internal_levels[i] = 0;
for (size_t i = 0; i < kMaxExternalLevels; ++i)
m_external_levels[i] = 0;
// TODO: Do we still need this line? Insertion heap memory is
// registered below. And merge buffer is equal to the new IA...
// total_ram - ram for the heaps - ram for the heap merger
m_mem_left = m_mem_total - 2 * m_mem_for_heaps;
// reverse insertion heap memory on processor-local memory
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (long p = 0; p < static_cast<long>(m_num_insertion_heaps); ++p)
{
m_proc[p] = new ProcessorData;
m_proc[p]->insertion_heap.reserve(m_insertion_heap_capacity);
assert(m_proc[p]->insertion_heap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
}
m_mem_left -= m_num_insertion_heaps * insertion_heap_int_memory();
// prepare prefetch buffer pool (already done in initializer),
// initially zero.
// prepare write buffer pool: calculate size and subtract from mem_left
external_array_type::prepare_write_pool(m_pool, m_num_insertion_heaps);
m_mem_left -= m_pool.size_write() * block_size;
// prepare internal arrays
if (c_merge_sorted_heaps) {
m_internal_arrays.reserve(m_mem_total / m_mem_for_heaps);
}
else {
m_internal_arrays.reserve(m_mem_total * m_num_insertion_heaps / m_mem_for_heaps);
}
// prepare external arrays
m_external_arrays.reserve(c_num_reserved_external_arrays);
if (m_mem_total < m_mem_left) // checks if unsigned type wrapped.
{
die("Minimum memory requirement insufficient, "
"increase PPQ's memory limit or decrease buffers.");
}
check_invariants();
}
//! non-copyable: delete copy-constructor
parallel_priority_queue(const parallel_priority_queue&) = delete;
//! non-copyable: delete assignment operator
parallel_priority_queue& operator = (const parallel_priority_queue&) = delete;
//! Destructor.
~parallel_priority_queue()
{
// clean up data structures
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
delete m_proc[p];
}
}
protected:
//! Assert many invariants of the data structures.
void check_invariants() const
{
#ifdef NDEBUG
// disable in Release builds
return;
#endif
size_type mem_used = 0;
mem_used += 2 * m_mem_for_heaps
+ m_pool.size_write() * block_size
+ m_pool.free_size_prefetch() * block_size
+ m_num_hinted_blocks * block_size
+ m_num_used_read_blocks * block_size;
// count number of blocks hinted in prefetcher
size_t num_hinted = 0, num_used_read = 0;
for (size_t i = 0; i < m_external_arrays.size(); ++i) {
num_hinted += m_external_arrays[i].num_hinted_blocks();
num_used_read += m_external_arrays[i].num_used_blocks();
}
die_unless(num_hinted == m_num_hinted_blocks);
die_unless(num_used_read == m_num_used_read_blocks);
die_unequal(m_num_used_read_blocks,
m_num_read_blocks
- m_pool.free_size_prefetch()
- m_num_hinted_blocks);
// test the processor local data structures
size_type heaps_size = 0;
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
// check that each insertion heap is a heap
// TODO: remove soon, because this is very expensive
die_unless(1 || stxxl::is_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end(),
m_compare));
die_unless(m_proc[p]->insertion_heap.capacity() <= m_insertion_heap_capacity);
heaps_size += m_proc[p]->insertion_heap.size();
mem_used += m_proc[p]->insertion_heap.capacity() * sizeof(value_type);
}
if (!m_in_bulk_push)
die_unequal(m_heaps_size, heaps_size);
// count number of items and memory size of internal arrays
size_type ia_size = 0;
size_type ia_memory = 0;
std::vector<size_t> ia_levels(kMaxInternalLevels, 0);
for (typename internal_arrays_type::const_iterator ia =
m_internal_arrays.begin(); ia != m_internal_arrays.end(); ++ia)
{
ia_size += ia->size();
ia_memory += ia->int_memory();
++ia_levels[ia->level()];
}
die_unequal(m_internal_size, ia_size);
mem_used += ia_memory;
for (size_t i = 0; i < kMaxInternalLevels; ++i)
die_unequal(m_internal_levels[i], ia_levels[i]);
// count number of items in external arrays
size_type ea_size = 0;
size_type ea_memory = 0;
std::vector<size_t> ea_levels(kMaxExternalLevels, 0);
for (typename external_arrays_type::const_iterator ea =
m_external_arrays.begin(); ea != m_external_arrays.end(); ++ea)
{
ea_size += ea->size();
ea_memory += ea->int_memory();
++ea_levels[ea->level()];
}
die_unequal(m_external_size, ea_size);
mem_used += ea_memory;
for (size_t i = 0; i < kMaxExternalLevels; ++i)
die_unequal(m_external_levels[i], ea_levels[i]);
// calculate mem_used so that == mem_total - mem_left
die_unequal(memory_consumption(), mem_used);
}
//! \}
//! \name Properties
//! \{
public:
//! The number of elements in the queue.
inline size_type size() const
{
return m_heaps_size + m_internal_size + m_external_size + m_extract_buffer_size;
}
//! Returns if the queue is empty.
inline bool empty() const
{
return (size() == 0);
}
//! The memory consumption in Bytes.
inline size_type memory_consumption() const
{
assert(m_mem_total >= m_mem_left);
return (m_mem_total - m_mem_left);
}
protected:
//! Returns if the extract buffer is empty.
inline bool extract_buffer_empty() const
{
return (m_extract_buffer_size == 0);
}
//! \}
public:
//! \name Bulk Operations
//! \{
/*!
* Start a sequence of push operations.
* \param bulk_size Exact number of elements to push before the next pop.
*/
void bulk_push_begin(size_type bulk_size)
{
assert(!m_in_bulk_push);
m_in_bulk_push = true;
m_bulk_first_delayed_external_array = m_external_arrays.size();
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
// if bulk_size is large: use simple aggregation instead of keeping the
// heap property and sort everything afterwards.
if (bulk_size > heap_capacity && 0) {
m_is_very_large_bulk = true;
}
else {
m_is_very_large_bulk = false;
if (bulk_size + m_heaps_size > heap_capacity) {
if (m_heaps_size > 0) {
//flush_insertion_heaps();
}
}
}
// zero bulk insertion counters
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
m_proc[p]->heap_add_size = 0;
}
/*!
* Push an element inside a sequence of pushes.
* Run bulk_push_begin() before using this method.
*
* \param element The element to push.
* \param p The id of the insertion heap to use (usually the thread id).
*/
void bulk_push(const value_type& element, const size_t p)
{
assert(m_in_bulk_push);
heap_type& insheap = m_proc[p]->insertion_heap;
if (!m_is_very_large_bulk && 0)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items and keep heap invariant.
if (TLX_UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and siftUp
insheap.push_back(element);
std::push_heap(insheap.begin(), insheap.end(), m_compare);
}
else if (!m_is_very_large_bulk && 1)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items but DO NOT keep heap
// invariant.
if (TLX_UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and DO NOT siftUp
insheap.push_back(element);
}
else // m_is_very_large_bulk
{
if (TLX_UNLIKELY(insheap.size() >= 2 * 1024 * 1024)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put onto insertion heap but do not keep heap property
insheap.push_back(element);
}
m_proc[p]->heap_add_size++;
}
/*!
* Push an element inside a bulk sequence of pushes. Run bulk_push_begin()
* before using this method. This function uses the insertion heap id =
* omp_get_thread_num().
*
* \param element The element to push.
*/
void bulk_push(const value_type& element)
{
#if STXXL_PARALLEL
return bulk_push(element, static_cast<size_t>(omp_get_thread_num()));
#else
std::uniform_int_distribution<size_t> distr(0, m_num_insertion_heaps - 1);
return bulk_push(element, distr(m_rng));
#endif
}
/*!
* Ends a sequence of push operations. Run bulk_push_begin() and some
* bulk_push() before this.
*/
void bulk_push_end()
{
assert(m_in_bulk_push);
m_in_bulk_push = false;
if (!m_is_very_large_bulk && 0)
{
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
m_heaps_size += m_proc[p]->heap_add_size;
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else if (!m_is_very_large_bulk && 1)
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (long p = 0; p < static_cast<long>(m_num_insertion_heaps); ++p)
{
// reestablish heap property: siftUp only those items pushed
for (size_t index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
}
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else // m_is_very_large_bulk
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
if (m_proc[p]->insertion_heap.size() >= m_insertion_heap_capacity) {
// flush out overfull insertion heap arrays
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
else {
// reestablish heap property: siftUp only those items pushed
for (size_t index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
}
}
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
if (m_bulk_first_delayed_external_array != m_external_arrays.size()) {
LOG << "bulk_push_end: run delayed re-hinting of EAs";
rebuild_hint_tree();
}
check_invariants();
}
//! Extract up to max_size values at once.
void bulk_pop(std::vector<value_type>& out, size_t max_size)
{
LOG << "bulk_pop() max_size=" << max_size;
const size_t n_elements = std::min<size_t>(max_size, size());
assert(n_elements < m_extract_buffer_limit);
if (m_heaps_size > 0)
flush_insertion_heaps();
convert_eb_into_ia();
refill_extract_buffer(n_elements, n_elements);
out.resize(0);
using std::swap;
swap(m_extract_buffer, out);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
m_minima.deactivate_extract_buffer();
check_invariants();
}
//! Extracts all elements which are greater or equal to a given limit.
//! \param out result vector
//! \param limit limit value
//! \param max_size maximum number of items to extract
//! \return true if the buffer contains all items < limit, false it was too
//! small.
bool bulk_pop_limit(std::vector<value_type>& out, const value_type& limit,
size_t max_size = std::numeric_limits<size_t>::max())
{
LOG << "bulk_pop_limit with limit=" << limit;
convert_eb_into_ia();
if (m_heaps_size > 0) {
if (0)
flush_insertion_heaps();
else if (1)
flush_insertion_heaps_with_limit(limit);
}
size_type ias = m_internal_arrays.size();
size_type eas = m_external_arrays.size();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
size_t limiting_ea_index = m_external_min_tree.top();
// pop limit may have to change due to memory limit
value_type this_limit = limit;
bool has_full_range = true;
// get all relevant blocks
while (limiting_ea_index != m_external_min_tree.invalid_key)
{
const value_type& ea_limit =
m_external_arrays[limiting_ea_index].get_next_block_min();
if (m_compare(ea_limit, this_limit)) {
// No more EM data smaller or equal to limit
break;
}
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0) {
// No more read/prefetch blocks available for EA
this_limit = ea_limit;
has_full_range = false;
break;
}
wait_next_ea_blocks(limiting_ea_index);
// consider next limiting EA
limiting_ea_index = m_external_min_tree.top();
assert(limiting_ea_index < eas);
tlx::unused(limiting_ea_index);
}
// build sequences
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
assert(!m_external_arrays[i].empty());
assert(m_external_arrays[i].valid());
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
assert(!(m_internal_arrays[j].empty()));
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
end = std::upper_bound(begin, end, this_limit, m_inv_compare);
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
}
output_size = std::accumulate(sizes.begin(), sizes.end(), 0ull);
if (output_size > max_size) {
output_size = max_size;
has_full_range = false;
}
out.resize(output_size);
LOG << "bulk_pop_limit with" <<
" sequences=" << sequences.size() <<
" output_size=" << output_size <<
" has_full_range=" << has_full_range;
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out.begin(), output_size, m_inv_compare);
advance_arrays(sequences, sizes, eas, ias);
check_invariants();
return has_full_range;
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert a vector of elements at one time.
* \param elements Vector containing the elements to push.
* Attention: elements vector may be owned by the PQ afterwards.
*/
void bulk_push_vector(std::vector<value_type>& elements)
{
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (elements.size() > heap_capacity / 2) {
flush_array(elements);
return;
}
bulk_push_begin(elements.size());
#if STXXL_PARALLEL
#pragma omp parallel
{
const unsigned thread_num = omp_get_thread_num();
#pragma omp parallel for
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], thread_num);
}
}
#else
std::uniform_int_distribution<size_t> distr(0, m_num_insertion_heaps - 1);
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], distr(m_rng));
}
#endif
bulk_push_end();
}
#endif
//! \}
//! \name Aggregation Operations
//! \{
/*!
* Aggregate pushes. Use flush_aggregated_pushes() to finally push
* them. extract_min is allowed is allowed in between the aggregation of
* pushes if you can assure, that the extracted value is smaller than all
* of the aggregated values.
* \param element The element to push.
*/
void aggregate_push(const value_type& element)
{
m_aggregated_pushes.push_back(element);
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert the aggregated values into the queue using push(), bulk insert,
* or sorting, depending on the number of aggregated values.
*/
void flush_aggregated_pushes()
{
size_type size = m_aggregated_pushes.size();
size_type ram_internal = 2 * size * sizeof(value_type); // ram for the sorted array + part of the ram for the merge buffer
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (ram_internal > m_mem_for_heaps / 2) {
flush_array(m_aggregated_pushes);
}
else if ((m_aggregated_pushes.size() > c_single_insert_limit) && (m_aggregated_pushes.size() < heap_capacity)) {
bulk_push_vector(m_aggregated_pushes);
}
else {
for (value_iterator i = m_aggregated_pushes.begin(); i != m_aggregated_pushes.end(); ++i) {
push(*i);
}
}
m_aggregated_pushes.clear();
}
#endif
//! \}
//! \name std::priority_queue compliant operations
//! \{
/*!
* Insert new element
* \param element the element to insert.
* \param p number of insertion heap to insert item into
*/
void push(const value_type& element, const size_t p = 0)
{
assert(!m_in_bulk_push && !m_limit_extract);
heap_type& insheap = m_proc[p]->insertion_heap;
if (insheap.size() >= m_insertion_heap_capacity) {
flush_insertion_heap(p);
}
// push item to end of heap and siftUp
insheap.push_back(element);
size_t index = push_heap(insheap.begin(), insheap.end(),
m_compare);
++m_heaps_size;
if (insheap.size() == 1 || index == 0)
m_minima.update_heap(p);
}
//! Access the minimum element.
const value_type & top()
{
assert(!m_in_bulk_push && !m_limit_extract);
assert(!empty());
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
static const bool debug = false;
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
const unsigned& type = type_and_index.first;
const unsigned& index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
LOG << "heap " << index <<
": " << m_proc[index]->insertion_heap[0];
return m_proc[index]->insertion_heap[0];
case minima_type::IA:
LOG << "ia " << index <<
": " << m_internal_arrays[index].get_min();
return m_internal_arrays[index].get_min();
case minima_type::EB:
LOG << "eb " << m_extract_buffer_index <<
": " << m_extract_buffer[m_extract_buffer_index];
return m_extract_buffer[m_extract_buffer_index];
default:
die("Unknown extract type: " << type);
}
}
//! Remove the minimum element.
void pop()
{
assert(!m_in_bulk_push && !m_limit_extract);
m_stats.num_extracts++;
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
m_stats.extract_min_time.start();
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
unsigned type = type_and_index.first;
unsigned index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
{
heap_type& insheap = m_proc[index]->insertion_heap;
m_stats.pop_heap_time.start();
std::pop_heap(insheap.begin(), insheap.end(), m_compare);
insheap.pop_back();
m_stats.pop_heap_time.stop();
m_heaps_size--;
if (!insheap.empty())
m_minima.update_heap(index);
else
m_minima.deactivate_heap(index);
break;
}
case minima_type::IA:
{
m_internal_arrays[index].inc_min();
m_internal_size--;
if (!(m_internal_arrays[index].empty()))
m_minima.update_internal_array(index);
else
// internal array has run empty
m_minima.deactivate_internal_array(index);
break;
}
case minima_type::EB:
{
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (!extract_buffer_empty())
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
break;
}
default:
die("Unknown extract type: " << type);
}
m_stats.extract_min_time.stop();
check_invariants();
}
//! \}
//! \name Bulk-Limit Operations
//! \{
protected:
//! current limit element
value_type m_limit_element;
//! flag if inside a bulk limit extract session
bool m_limit_extract;
//! flag if the extract buffer contains the full limit range
bool m_limit_has_full_range;
public:
//! Begin bulk-limit extraction session with limit element.
void limit_begin(const value_type& limit, size_type bulk_size)
{
m_limit_extract = true;
m_limit_element = limit;
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, limit, m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
bulk_push_begin(bulk_size);
}
//! Push new item >= bulk-limit element into insertion heap p.
void limit_push(const value_type& element, const size_t p = 0)
{
assert(m_limit_extract);
assert(!m_compare(m_limit_element, element));
return bulk_push(element, p);
}
//! Access the minimum element, which can only be in the extract buffer.
const value_type & limit_top()
{
assert(m_limit_extract);
// if buffer is empty and we extracted the full range last time, return
// limit items as sentinel.
if (m_extract_buffer_size == 0 && m_limit_has_full_range)
return m_limit_element;
if (extract_buffer_empty())
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
return m_extract_buffer[m_extract_buffer_index];
}
//! Remove the minimum element, only works correctly while elements < L.
void limit_pop()
{
assert(m_limit_extract);
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (extract_buffer_empty() && !m_limit_has_full_range)
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
}
//! Finish bulk-limit extraction session.
void limit_end()
{
assert(m_limit_extract);
bulk_push_end();
m_limit_extract = false;
}
//! \}
protected:
//! Flushes all elements of the insertion heaps which are greater
//! or equal to a given limit.
//! \param limit limit value
void flush_insertion_heaps_with_limit(const value_type& limit)
{
// perform extract for all items < L into back of insertion_heap
std::vector<size_t> back_size(m_num_insertion_heaps);
//#if STXXL_PARALLEL
//#pragma omp parallel for
//#endif
for (long p = 0; p < static_cast<long>(m_num_insertion_heaps); ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
typename heap_type::iterator back = insheap.end();
while (back != insheap.begin() &&
m_compare(limit, insheap[0]))
{
// while top < L, perform pop_heap: put top to back and
// siftDown new items (shortens heap by one)
std::pop_heap(insheap.begin(), back, m_compare);
--back;
}
// range insheap.begin() + back to insheap.end() is < L, rest >= L.
for (typename heap_type::const_iterator it = insheap.begin();
it != insheap.end(); ++it)
{
if (it < back)
assert(!m_compare(limit, *it));
else
assert(m_compare(limit, *it));
}
back_size[p] = insheap.end() - back;
}
// put items from insertion heaps into an internal array
const size_t back_sum = std::accumulate(
back_size.begin(), back_size.end(), 0u);
LOG << "flush_insertion_heaps_with_limit(): back_sum = " << back_sum;
if (back_sum)
{
// test that enough RAM is available for remaining items
flush_ia_ea_until_memory_free(back_sum * sizeof(value_type));
std::vector<value_type> values(back_sum);
// copy items into values vector
typename std::vector<value_type>::iterator vi = values.begin();
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
std::copy(insheap.end() - back_size[p], insheap.end(), vi);
vi += back_size[p];
insheap.resize(insheap.size() - back_size[p]);
if (insheap.empty())
m_minima.deactivate_heap(p);
else
m_minima.update_heap(p);
}
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
add_as_internal_array(values);
m_heaps_size -= back_sum;
}
}
public:
/*!
* Merges all external arrays and all internal arrays into one external array.
* Public for benchmark purposes.
*/
void merge_external_arrays()
{
LOG1 << "Merging external arrays. This should not happen."
<< " You should adjust memory assignment and/or external array level size.";
check_external_level(0, true);
LOG << "Merging all external arrays done.";
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
//! Free up memory by flushing internal arrays and combining external
//! arrays until enough bytes are free.
void flush_ia_ea_until_memory_free(size_t mem_free)
{
if (m_mem_left >= mem_free) return;
if (m_internal_size > 0) {
flush_internal_arrays();
}
else {
merge_external_arrays();
}
assert(m_mem_left >= mem_free);
}
//! Automatically resize the read/prefetch buffer pool depending on number
//! of external arrays.
void resize_read_pool()
{
size_t new_num_read_blocks = static_cast<size_t>(
(m_num_read_blocks_per_ea * static_cast<float>(m_external_arrays.size())));
LOG << "resize_read_pool:" <<
" m_num_read_blocks=" << m_num_read_blocks <<
" ea_size=" << m_external_arrays.size() <<
" m_num_read_blocks_per_ea=" << m_num_read_blocks_per_ea <<
" new_num_read_blocks=" << new_num_read_blocks <<
" free_size_prefetch=" << m_pool.free_size_prefetch() <<
" m_num_hinted_blocks=" << m_num_hinted_blocks <<
" m_num_used_read_blocks=" << m_num_used_read_blocks;
// add new blocks
if (new_num_read_blocks > m_num_read_blocks)
{
const size_t mem_needed =
(new_num_read_blocks - m_num_read_blocks) * block_size;
// -tb: this may recursively call this function!
//flush_ia_ea_until_memory_free(mem_needed);
assert(m_mem_left >= mem_needed);
while (new_num_read_blocks > m_num_read_blocks) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
}
m_mem_left -= mem_needed;
}
// steal extra blocks (as many as possible)
if (new_num_read_blocks < m_num_read_blocks)
{
while (new_num_read_blocks < m_num_read_blocks &&
m_pool.free_size_prefetch() > 0)
{
block_type* del_block = m_pool.steal_prefetch();
delete del_block;
--m_num_read_blocks;
m_mem_left += block_size;
}
if (new_num_read_blocks < m_num_read_blocks)
LOG1 << "WARNING: could not immediately reduce read/prefetch pool!";
}
}
//! Rebuild hint tree completely as the hint sequence may have changed, and
//! re-hint the correct block sequence.
void rebuild_hint_tree()
{
m_stats.hint_time.start();
// prepare rehinting sequence: reset hint begin pointer
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_prepare();
// rebuild hint tree with first elements
for (size_t i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].has_unhinted_em_data()) {
m_hint_tree.activate_without_replay(i);
}
else {
m_hint_tree.deactivate_without_replay(i);
}
}
m_hint_tree.rebuild();
// virtually release all hints
size_t free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
size_t gmin_index;
while (free_prefetch_blocks > 0 &&
(gmin_index = m_hint_tree.top()) != m_hint_tree.invalid_key)
{
assert(gmin_index < m_external_arrays.size());
LOG << "Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min();
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
// invalidate all hinted blocks no longer needed
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
m_stats.hint_time.stop();
}
//! Updates the prefetch prediction tree afer a remove_items(), which frees
//! up blocks.
//! \param ea_index index of the external array in question
inline void update_hint_tree(size_t ea_index)
{
m_stats.hint_time.start();
if (m_external_arrays[ea_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(ea_index);
}
else {
m_hint_tree.deactivate_player(ea_index);
}
m_stats.hint_time.stop();
}
//! Updates the external min tree afer a remove() or a
//! wait_next_blocks() call.
//! \param ea_index index of the external array in question
inline void update_external_min_tree(size_t ea_index)
{
if (m_external_arrays[ea_index].has_em_data()) {
m_external_min_tree.replay_on_change(ea_index);
}
else {
m_external_min_tree.deactivate_player(ea_index);
}
}
//! Hints EA blocks which will be needed soon. Hints at most
//! m_num_prefetchers blocks globally.
inline void hint_external_arrays()
{
m_stats.hint_time.start();
LOG << "hint_external_arrays()"
" for free_size_prefetch=" << m_pool.free_size_prefetch();
size_t gmin_index;
while (m_pool.free_size_prefetch() > 0 &&
(gmin_index = m_hint_tree.top()) != m_hint_tree.invalid_key)
{
assert(gmin_index < m_external_arrays.size());
LOG << "Give hint in EA[" << gmin_index << "]";
m_external_arrays[gmin_index].hint_next_block();
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
m_stats.hint_time.stop();
}
//! Print statistics.
void print_stats() const
{
LOG << "c_merge_sorted_heaps = " << c_merge_sorted_heaps;
LOG << "c_limit_extract_buffer = " << c_limit_extract_buffer;
LOG << "c_single_insert_limit = " << c_single_insert_limit;
if (c_limit_extract_buffer) {
LOG << "m_extract_buffer_limit = " << m_extract_buffer_limit;
LOG << "m_extract_buffer_limit * sizeof(value_type) = " << tlx::format_iec_units(m_extract_buffer_limit * sizeof(value_type));
}
#if STXXL_PARALLEL
LOG << "omp_get_max_threads() = " << omp_get_max_threads();
#endif
LOG << "m_mem_for_heaps = " << tlx::format_iec_units(m_mem_for_heaps);
LOG << "m_mem_left = " << tlx::format_iec_units(m_mem_left);
//if (num_extract_buffer_refills > 0) {
// LOG << "total_extract_buffer_size / num_extract_buffer_refills = " << total_extract_buffer_size / num_extract_buffer_refills;
// LOG << "total_extract_buffer_size / num_extract_buffer_refills * sizeof(value_type) = " << tlx::format_iec_units(total_extract_buffer_size / num_extract_buffer_refills * sizeof(value_type));
//}
LOG1 << m_stats;
m_minima.print_stats();
}
protected:
//! Calculates the sequences vector needed by the multiway merger,
//! considering inaccessible data from external arrays.
//! \param sizes The sizes vector stores the size of each sequence. (output parameter)
//! \param sequences Result vector which stores the begin and end iterators of the
//! merge sequences (also input parameter if reuse_previous_lower_bounds is true)
//! \param reuse_previous_lower_bounds If true, the method reuses upper bounds from
//! previous runs.sequences[i].second must then be valid upper bound iterator
//! from a previous run!
//! \returns the index of the external array which is limiting factor
//! or m_external_arrays.size() if not limited.
size_t calculate_merge_sequences(std::vector<size_type>& sizes,
std::vector<iterator_pair_type>& sequences,
bool reuse_previous_lower_bounds = false)
{
LOG <<
"calculate_merge_sequences() " <<
"reuse_previous_lower_bounds=" << reuse_previous_lower_bounds;
static const bool debug = false;
const size_type eas = m_external_arrays.size();
const size_type ias = m_internal_arrays.size();
assert(sizes.size() == eas + ias);
assert(sequences.size() == eas + ias);
/*
* determine minimum of each first block
*/
const size_t gmin_index = m_external_min_tree.top();
bool needs_limit = (gmin_index != m_external_min_tree.invalid_key);
LOG << "calculate_merge_sequences() gmin_index=" << gmin_index <<
" needs_limit=" << needs_limit;
// test correctness of external block min tree
#ifdef STXXL_EXPENSIVE_ASSERTIONS
m_stats.refill_minmax_time.start();
{
auto find_em = [&](size_t idx) {
for (size_t i = idx; i < m_external_arrays.size(); ++i) {
if (m_external_arrays[i].has_em_data())
return i;
}
return m_external_arrays.size();
};
size_t test_gmin_index = find_em(0);
if (test_gmin_index != eas) {
value_type test_gmin_value =
m_external_arrays[test_gmin_index].get_next_block_min();
for (size_t i = find_em(test_gmin_index + 1); i < eas; i = find_em(i + 1)) {
const value_type& min_value =
m_external_arrays[i].get_next_block_min();
LOG << "min[" << i << "]: " << min_value <<
" test: " << test_gmin_value <<
": " << m_inv_compare(min_value, test_gmin_value);
if (m_inv_compare(min_value, test_gmin_value)) {
test_gmin_value = min_value;
test_gmin_index = i;
}
}
assert(gmin_index == test_gmin_index);
tlx::unused(gmin_index);
}
else {
assert(!needs_limit);
tlx::unused(needs_limit);
}
}
m_stats.refill_minmax_time.stop();
#endif
/*
* calculate size and create sequences to merge
*/
#if STXXL_PARALLEL
// #pragma omp parallel for if(eas + ias > m_num_insertion_heaps)
// ATTENTION: change type of i to long if pragma is activated!
#endif
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
if (needs_limit) {
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
// remove timer if parallel
//stats.refill_lower_bound_time.start();
if (reuse_previous_lower_bounds) {
// Be careful that sequences[i].second is really valid and
// set by a previous calculate_merge_sequences() run!
end = std::lower_bound(sequences[i].second, end,
gmin_value, m_inv_compare);
}
else
{
LOG << "lower_bound [" << begin << "," << end << ")" <<
" gmin_value " << gmin_value;
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
//stats.refill_lower_bound_time.stop();
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
LOG << "sequence[" << i << "] " << (i < eas ? "ea " : "ia ") <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : "");
}
if (needs_limit) {
LOG << "return with needs_limit: gmin_index=" << gmin_index;
return gmin_index;
}
else {
LOG << "return with needs_limit: eas=" << eas;
return eas;
}
}
protected:
//! Convert extract buffer into a new internal array.
void convert_eb_into_ia(bool do_not_flush = false)
{
if (m_extract_buffer_size == 0) return;
LOG << "convert_eb_into_ia";
// tb: if in limit sequence and the EB gets flushed out to EM, then we
// have to re-merge items into the EB instead of returning the
// sentinel.
m_limit_has_full_range = false;
// TODO: memory is NOT allocated, but extract buffer is currently not
// counted
if (!do_not_flush)
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(m_extract_buffer.size()));
if (m_extract_buffer_size == 0) return;
// first deactivate extract buffer to replay tree for new IA.
m_minima.deactivate_extract_buffer();
// add eb as internal array with current index
add_as_internal_array(m_extract_buffer, m_extract_buffer_index);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
}
//! Refills the extract buffer from the external arrays.
//! \param minimum_size requested minimum size of the resulting extract buffer.
//! Prints a warning if there is not enough data to reach this size.
//! \param maximum_size maximum size of the extract buffer. Using
//! m_extract_buffer_limit if set to 0.
inline void refill_extract_buffer(size_t minimum_size = 0,
size_t maximum_size = 0)
{
LOG << "refill_extract_buffer()" <<
" ia_size=" << m_internal_arrays.size() <<
" ea_size=" << m_external_arrays.size();
if (maximum_size == 0)
maximum_size = m_extract_buffer_limit;
check_invariants();
assert(extract_buffer_empty());
m_extract_buffer_index = 0;
cleanup_external_arrays();
size_type ias, eas = m_external_arrays.size();
m_minima.clear_internal_arrays();
cleanup_internal_arrays();
ias = m_internal_arrays.size();
if (eas == 0 && ias == 0) {
m_extract_buffer.resize(0);
m_minima.deactivate_extract_buffer();
return;
}
m_stats.num_extract_buffer_refills++;
m_stats.refill_extract_buffer_time.start();
m_stats.refill_time_before_merge.start();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
if (minimum_size > 0) {
size_t limiting_ea_index = eas + 1;
bool reuse_lower_bounds = false;
while (output_size < minimum_size)
{
LOG << "refill: request more data," <<
" output_size=" << output_size <<
" minimum_size=" << minimum_size <<
" limiting_ea_index=" << limiting_ea_index;
if (limiting_ea_index < eas) {
LOG << "refill: limiting_ea_index";
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0)
break;
wait_next_ea_blocks(limiting_ea_index);
reuse_lower_bounds = true;
}
else if (limiting_ea_index == eas) {
// no more unaccessible EM data
LOG1 << "Warning: refill_extract_buffer(n): minimum_size > # mergeable elements!";
break;
}
limiting_ea_index = calculate_merge_sequences(
sizes, sequences, reuse_lower_bounds);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0u);
}
}
else {
calculate_merge_sequences(sizes, sequences);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0u);
}
if (c_limit_extract_buffer) {
output_size = std::min<size_t>(output_size, maximum_size);
}
m_stats.max_extract_buffer_size.set_max(output_size);
m_stats.total_extract_buffer_size += output_size;
assert(output_size > 0);
m_extract_buffer.resize(output_size);
m_extract_buffer_size = output_size;
m_stats.refill_time_before_merge.stop();
m_stats.refill_merge_time.start();
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
m_extract_buffer.begin(), output_size, m_inv_compare);
m_stats.refill_merge_time.stop();
m_stats.refill_time_after_merge.start();
advance_arrays(sequences, sizes, eas, ias);
m_minima.update_extract_buffer();
m_stats.refill_time_after_merge.stop();
m_stats.refill_extract_buffer_time.stop();
check_invariants();
}
//! Requests more EM data from a given EA and updates
//! the winner trees and hints accordingly.
inline void wait_next_ea_blocks(const size_t ea_index)
{
LOG << "wait_next_ea_blocks() ea_index=" << ea_index;
size_t used_blocks =
m_external_arrays[ea_index].wait_next_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
update_external_min_tree(ea_index);
}
// Removes empty arrays and updates the winner trees accordingly
inline void advance_arrays(std::vector<iterator_pair_type>& sequences,
std::vector<size_type>& sizes,
size_t eas, size_t ias)
{
size_t total_freed_blocks = 0;
for (size_type i = 0; i < eas + ias; ++i) {
// dist represents the number of elements that haven't been merged
size_type dist = std::distance(sequences[i].first,
sequences[i].second);
const size_t diff = sizes[i] - dist;
if (diff == 0) continue;
if (i < eas) {
// remove items and free blocks in RAM.
const size_t freed_blocks =
m_external_arrays[i].remove_items(diff);
m_num_used_read_blocks -= freed_blocks;
total_freed_blocks += freed_blocks;
// correct item count.
assert(m_external_size >= diff);
m_external_size -= diff;
}
else {
size_type j = i - eas;
m_internal_arrays[j].inc_min(diff);
assert(m_internal_size >= diff);
m_internal_size -= diff;
}
}
// remove empty arrays - important for the next round (may also reduce
// number of prefetch buffers, so must be before hinting).
cleanup_external_arrays();
// prefetch new blocks from EAs using freed blocks
if (total_freed_blocks)
hint_external_arrays();
m_stats.num_new_external_arrays = 0;
cleanup_internal_arrays();
}
//! Flushes the insertions heap p into an internal array.
inline void flush_insertion_heap(size_t p)
{
assert(m_proc[p]->insertion_heap.size() != 0);
heap_type& insheap = m_proc[p]->insertion_heap;
size_t size = insheap.size();
LOG0 <<
"Flushing insertion heap array p=" << p <<
" size=" << insheap.size() <<
" capacity=" << insheap.capacity() <<
" int_memory=" << internal_array_type::int_memory(insheap.size()) <<
" mem_left=" << m_mem_left;
m_stats.num_insertion_heap_flushes++;
stats_timer flush_time(true); // separate timer due to parallel sorting
// sort locally, independent of others
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
#if STXXL_PARALLEL
#pragma omp critical(stxxl_flush_insertion_heap)
#endif
{
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(insheap.size()));
// invalidate player in minima tree (before adding the IA to tree)
m_minima.deactivate_heap(p);
// insheap is empty afterwards, as vector was swapped into new_array
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
assert(insheap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
// update item counts
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size -= size;
}
m_stats.insertion_heap_flush_time += flush_time;
}
//! Flushes all insertions heaps into an internal array.
inline void flush_insertion_heaps()
{
size_type max_mem_needed;
if (c_merge_sorted_heaps) {
max_mem_needed = m_mem_for_heaps;
}
else {
max_mem_needed = insertion_heap_int_memory();
}
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(max_mem_needed);
m_stats.num_insertion_heap_flushes++;
m_stats.insertion_heap_flush_time.start();
size_type size = m_heaps_size;
size_type int_memory = 0;
assert(size > 0);
std::vector<std::pair<value_iterator, value_iterator> > sequences(m_num_insertion_heaps);
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (long i = 0; i < static_cast<long>(m_num_insertion_heaps); ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
if (c_merge_sorted_heaps)
sequences[i] = std::make_pair(insheap.begin(), insheap.end());
int_memory += insheap.capacity();
}
if (c_merge_sorted_heaps)
{
m_stats.merge_sorted_heaps_time.start();
std::vector<value_type> merged_array(size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), size, m_inv_compare);
m_stats.merge_sorted_heaps_time.stop();
add_as_internal_array(merged_array);
for (size_t i = 0; i < m_num_insertion_heaps; ++i)
{
m_proc[i]->insertion_heap.clear();
m_proc[i]->insertion_heap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
else
{
for (unsigned i = 0; i < m_num_insertion_heaps; ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
if (insheap.size() == 0) continue;
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
m_heaps_size = 0;
m_stats.insertion_heap_flush_time.stop();
check_invariants();
}
//! Flushes the internal arrays into an external array.
void flush_internal_arrays()
{
LOG << "Flushing internal arrays" <<
" num_arrays=" << m_internal_arrays.size();
m_stats.num_internal_array_flushes++;
m_stats.internal_array_flush_time.start();
m_minima.clear_internal_arrays();
// also flush extract buffer items out to disk.
convert_eb_into_ia(true);
// clean up internal arrays that have been deleted in extract_min!
cleanup_internal_arrays();
size_type num_arrays = m_internal_arrays.size();
size_type size = m_internal_size;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences(num_arrays);
for (unsigned i = 0; i < num_arrays; ++i)
{
sequences[i] = std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end());
int_memory += m_internal_arrays[i].int_memory();
}
// must release more RAM in IAs than the EA takes, otherwise: merge
// external and internal arrays!
if (int_memory < external_array_type::int_memory(size)
+ static_cast<size_t>(ceil(m_num_read_blocks_per_ea)) * block_size)
{
return merge_external_arrays();
}
// construct new external array
external_array_type ea(size, &m_pool, 0);
m_stats.max_merge_buffer_size.set_max(size);
{
external_array_writer_type external_array_writer(ea);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
external_array_writer.begin(), size, m_inv_compare);
}
LOG << "Merge done of new ea " << &ea << " size " << size;
m_external_arrays.swap_back(ea);
m_internal_size = 0;
m_external_size += size;
// register EA in min tree
// important for check_external_level()!
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
m_internal_arrays.clear();
m_stats.num_new_internal_arrays = 0;
cleanup_internal_arrays();
// TODO: is this necessary? See cleanup_internal_arrays().
for (size_t i = 0; i < kMaxInternalLevels; ++i)
m_internal_levels[i] = 0;
m_mem_left += int_memory;
m_mem_left -= m_external_arrays.back().int_memory();
m_stats.max_num_external_arrays.set_max(m_external_arrays.size());
m_stats.internal_array_flush_time.stop();
// update EA level and potentially merge
++m_external_levels[0];
check_external_level(0);
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
// Compares the largest accessible value of two external arrays.
struct s_min_tree_comparator {
const external_arrays_type& m_eas;
const std::vector<size_t>& m_indices;
const inv_compare_type& m_compare;
s_min_tree_comparator(const external_arrays_type& eas,
const inv_compare_type& compare,
const std::vector<size_t>& indices)
: m_eas(eas), m_indices(indices), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[m_indices[a]].get_next_hintable_min(),
m_eas[m_indices[b]].get_next_hintable_min());
}
};
//! Merges external arrays if there are too many external arrays on
//! the same level.
void check_external_level(const size_t level, const bool force_merge_all = false)
{
if (!force_merge_all)
LOG << "Checking external level " << level;
// return if EA level is not full
if (m_external_levels[level] < c_max_external_level_size && !force_merge_all)
return;
size_t level_size = 0;
size_type int_memory = 0;
std::vector<size_t> ea_index;
for (size_t i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].level() != level && !force_merge_all) continue;
if (m_external_arrays[i].empty()) continue;
level_size += m_external_arrays[i].size();
int_memory += m_external_arrays[i].int_memory();
ea_index.push_back(i);
}
// return if there is not enough RAM for the new array.
// TODO: force_merge_all==true is for freeing memory. Breaking here is not
// helpful in this case. But one should maybe reserve some space in advance.
if (m_mem_left < external_array_type::int_memory(level_size) && !force_merge_all)
return;
m_mem_left -= external_array_type::int_memory(level_size);
assert(force_merge_all || c_max_external_level_size == ea_index.size());
const size_t num_arrays_to_merge = ea_index.size();
LOG << "merging external arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << num_arrays_to_merge <<
" force_merge_all=" << force_merge_all;
// if force_merge_all: create array in highest level to avoid merging
// of such a large EA.
const size_t new_level = force_merge_all ? kMaxExternalLevels - 1 : level + 1;
// construct new external array
external_array_type ea(level_size, &m_pool, new_level);
{
external_array_writer_type external_array_writer(ea);
typename external_array_writer_type::iterator out_iter
= external_array_writer.begin();
// === build minima_tree over the level's arrays ===
s_min_tree_comparator min_tree_comparator(m_external_arrays,
m_inv_compare, ea_index);
winner_tree<s_min_tree_comparator> min_tree(num_arrays_to_merge,
min_tree_comparator);
// =================================================
size_t num_arrays_done = 0;
while (num_arrays_to_merge != num_arrays_done)
{
LOG << "num_arrays_done = " << num_arrays_done;
// === build hints ===
for (size_t i = 0; i < num_arrays_to_merge; ++i) {
if (m_external_arrays[ea_index[i]].has_unhinted_em_data()) {
min_tree.activate_without_replay(i);
}
else {
min_tree.deactivate_without_replay(i);
}
}
min_tree.rebuild();
// === fill available memory with read blocks ===
while (m_mem_left >= block_size) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
m_mem_left -= block_size;
}
// ==============================================
// cleanup hints (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i) {
m_external_arrays[i].rebuild_hints_prepare();
}
// virtually release all hints
size_t free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
size_t gmin_index_index; // index in ea_index
while (free_prefetch_blocks > 0 &&
(gmin_index_index = min_tree.top()) != min_tree.invalid_key)
{
const size_t gmin_index = ea_index[gmin_index_index];
assert(gmin_index < m_external_arrays.size());
LOG0 << "check_external_level():Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min();
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
min_tree.replay_on_change(gmin_index_index);
}
else {
min_tree.deactivate_player(gmin_index_index);
}
}
// invalidate all hinted blocks no longer needed
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
// ================================ end build hints ======
// === wait for data ===
for (size_t i = 0; i < num_arrays_to_merge; ++i) {
const size_t index = ea_index[i];
const size_t used_blocks =
m_external_arrays[index].wait_all_hinted_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
}
// =====================
// === build sequences ===
std::vector<iterator_pair_type> sequences(num_arrays_to_merge);
std::vector<size_type> sizes(num_arrays_to_merge);
gmin_index_index = min_tree.top();
bool needs_limit = (gmin_index_index != min_tree.invalid_key) ? true : false;
for (size_t i = 0; i < num_arrays_to_merge; ++i) {
const size_t index = ea_index[i];
iterator begin = m_external_arrays[index].begin();
iterator end = m_external_arrays[index].end();
if (needs_limit) {
const size_t gmin_index = ea_index[gmin_index_index];
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
LOG << "sequence[" << i << "] ea " <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : "");
}
// ==========================================
// === merge ===
size_type output_size = std::accumulate(sizes.begin(), sizes.end(), 0u);
out_iter = potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out_iter, output_size, m_inv_compare);
for (size_t i = 0; i < num_arrays_to_merge; ++i) {
const size_t index = ea_index[i];
if (!m_external_arrays[index].empty()) {
// remove items and free blocks in RAM.
size_t freed_blocks =
m_external_arrays[index].remove_items(sizes[i]);
m_num_used_read_blocks -= freed_blocks;
if (m_external_arrays[index].empty())
++num_arrays_done;
}
}
// reset read buffer
resize_read_pool();
// cannot call clear_external_arrays() here, since it
// corrupts ea_index.
}
if (m_in_bulk_push)
m_bulk_first_delayed_external_array = 0; // TODO: workaround
} // destroy external_array_writer
// clean up now empty arrays
cleanup_external_arrays();
m_external_arrays.swap_back(ea);
++m_external_levels[new_level];
// register EA in min tree
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
LOG << "Merge done of new ea " << &ea;
if (!force_merge_all)
check_external_level(level + 1);
check_invariants();
}
//! Add new internal array, which requires that values are sorted!
//! automatically decreases m_mem_left! also merges internal arrays if
//! there are too many internal arrays on the same level.
void add_as_internal_array(std::vector<value_type>& values,
const size_t used = 0,
const size_t level = 0)
{
const size_t size = values.size();
const size_t capacity = values.capacity();
assert(size > used); // at least one element
internal_array_type new_array(values, used, level);
assert(new_array.int_memory() ==
internal_array_type::int_memory(capacity));
m_internal_arrays.swap_back(new_array);
if (!extract_buffer_empty()) {
m_stats.num_new_internal_arrays++;
m_stats.max_num_new_internal_arrays.set_max(
m_stats.num_new_internal_arrays);
m_minima.add_internal_array(
static_cast<unsigned>(m_internal_arrays.size()) - 1);
}
m_internal_size += size - used;
m_mem_left -= internal_array_type::int_memory(capacity);
die_unless(level < kMaxInternalLevels &&
"Internal array level is larger than anything possible "
"in this universe. Increase the size of m_internal_levels");
++m_internal_levels[level];
m_stats.max_num_internal_arrays.set_max(m_internal_arrays.size());
// if IA level is too large ...
if (m_internal_levels[level] < c_max_internal_level_size) return;
size_t level_size = 0;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences;
std::vector<size_t> ia_index;
for (size_t i = 0; i < m_internal_arrays.size(); ++i)
{
if (m_internal_arrays[i].level() != level) continue;
if (m_internal_arrays[i].empty()) continue;
level_size += m_internal_arrays[i].size();
int_memory += m_internal_arrays[i].int_memory();
sequences.push_back(std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end()));
ia_index.push_back(i);
}
// AND there is enough RAM to merge it (without flushing out to EA).
if (m_mem_left < internal_array_type::int_memory(level_size)) return;
// must free up more memory than the new array needs.
assert(int_memory >= internal_array_type::int_memory(level_size));
LOG << "merging internal arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << sequences.size();
std::vector<value_type> merged_array(level_size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), level_size, m_inv_compare);
// release memory of old internal arrays immediately
for (const size_t& ia : ia_index) {
m_internal_arrays[ia].make_empty();
}
cleanup_internal_arrays();
// in add_as_internal_array the level_size is re-added!
m_internal_size -= level_size;
// add as new internal array at next level (and maybe recursively merge)
add_as_internal_array(merged_array, 0, level + 1);
}
/*!
* Sorts the values from values and writes them into an internal array.
* Don't use the value vector afterwards!
*
* \param values the vector to sort and store
*/
void flush_array_internal(std::vector<value_type>& values)
{
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
// flush until enough memory for new array
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(values.size()));
add_as_internal_array(values);
}
//! Struct of all statistical counters and timers. Turn on/off statistics
//! using the stats_counter and stats_timer typedefs.
struct stats_type
{
//! Largest number of elements in the extract buffer at the same time
stats_counter max_extract_buffer_size;
//! Sum of the sizes of each extract buffer refill. Used for average
//! size.
stats_counter total_extract_buffer_size;
//! Largest number of elements in the merge buffer when running
//! flush_internal_arrays()
stats_counter max_merge_buffer_size;
//! Total number of extracts
stats_counter num_extracts;
//! Number of refill_extract_buffer() calls
stats_counter num_extract_buffer_refills;
//! Number of flush_insertion_heaps() calls
stats_counter num_insertion_heap_flushes;
//! Number of flush_directly_to_hd() calls
stats_counter num_direct_flushes;
//! Number of flush_internal_arrays() calls
stats_counter num_internal_array_flushes;
//! Number of merge_external_arrays() calls
stats_counter num_external_array_merges;
//! Largest number of internal arrays at the same time
stats_counter max_num_internal_arrays;
//! Largest number of external arrays at the same time
stats_counter max_num_external_arrays;
//! Temporary number of new external arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_external_arrays;
//! Largest number of new external arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_external_arrays;
//! Temporary number of new internal arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_internal_arrays;
//! Largest number of new internal arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_internal_arrays;
//! Total time for flush_insertion_heaps()
stats_timer insertion_heap_flush_time;
//! Total time for flush_directly_to_hd()
stats_timer direct_flush_time;
//! Total time for flush_internal_arrays()
stats_timer internal_array_flush_time;
//! Total time for merge_external_arrays()
stats_timer external_array_merge_time;
//! Total time for extract_min()
stats_timer extract_min_time;
//! Total time for refill_extract_buffer()
stats_timer refill_extract_buffer_time;
//! Total time for the merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_merge_time;
//! Total time for all things before merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_before_merge;
//! Total time for all things after merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_after_merge;
//! Total time of wait() calls in first part of
//! refill_extract_buffer(). Part of refill_time_before_merge and
//! refill_extract_buffer_time.
stats_timer refill_wait_time;
//! Total time for pop_heap() in extract_min().
//! Part of extract_min_time.
stats_timer pop_heap_time;
//! Total time for merging the sorted heaps.
//! Part of flush_insertion_heaps.
stats_timer merge_sorted_heaps_time;
//! Total time for std::lower_bound calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
// stats_timer refill_lower_bound_time;
//! Total time for std::accumulate calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_accumulate_time;
//! Total time for determining the smallest max value in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_minmax_time;
stats_timer hint_time;
friend std::ostream& operator << (std::ostream& os, const stats_type& o)
{
return os << "max_extract_buffer_size=" << o.max_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "total_extract_buffer_size=" << o.total_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "max_merge_buffer_size=" << o.max_merge_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "num_extracts=" << o.num_extracts << std::endl
<< "num_extract_buffer_refills=" << o.num_extract_buffer_refills << std::endl
<< "num_insertion_heap_flushes=" << o.num_insertion_heap_flushes << std::endl
<< "num_direct_flushes=" << o.num_direct_flushes << std::endl
<< "num_internal_array_flushes=" << o.num_internal_array_flushes << std::endl
<< "num_external_array_merges=" << o.num_external_array_merges << std::endl
<< "max_num_internal_arrays=" << o.max_num_internal_arrays << std::endl
<< "max_num_external_arrays=" << o.max_num_external_arrays << std::endl
<< "num_new_external_arrays=" << o.num_new_external_arrays << std::endl
<< "max_num_new_external_arrays=" << o.max_num_new_external_arrays << std::endl
<< "num_new_internal_arrays=" << o.num_new_internal_arrays << std::endl
<< "max_num_new_internal_arrays=" << o.max_num_new_internal_arrays << std::endl
<< "insertion_heap_flush_time=" << o.insertion_heap_flush_time << std::endl
<< "direct_flush_time=" << o.direct_flush_time << std::endl
<< "internal_array_flush_time=" << o.internal_array_flush_time << std::endl
<< "external_array_merge_time=" << o.external_array_merge_time << std::endl
<< "extract_min_time=" << o.extract_min_time << std::endl
<< "refill_extract_buffer_time=" << o.refill_extract_buffer_time << std::endl
<< "refill_merge_time=" << o.refill_merge_time << std::endl
<< "refill_time_before_merge=" << o.refill_time_before_merge << std::endl
<< "refill_time_after_merge=" << o.refill_time_after_merge << std::endl
<< "refill_wait_time=" << o.refill_wait_time << std::endl
<< "pop_heap_time=" << o.pop_heap_time << std::endl
<< "merge_sorted_heaps_time=" << o.merge_sorted_heaps_time << std::endl
// << "refill_lower_bound_time=" << o.refill_lower_bound_time << std::endl
<< "refill_accumulate_time=" << o.refill_accumulate_time << std::endl
<< "refill_minmax_time=" << o.refill_minmax_time << std::endl
<< "hint_time=" << o.hint_time << std::endl;
}
};
stats_type m_stats;
};
} // namespace stxxl
#endif // !STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
|
sparselu.c
|
/**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if ( r_err == 0.0 ) continue;
if (r_err < 0.0 ) r_err = -r_err;
if ( M[i*bots_arg_size_1+j] == 0 )
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]);
return FALSE;
}
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
int a=0,b=0;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
a++;
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
b++;
M[ii*bots_arg_size+jj] = NULL;
}
}
}
bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b)));
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel private(kk)
{
for (kk=0; kk<bots_arg_size; kk++)
{
#pragma omp single
lu0(BENCH[kk*bots_arg_size+kk]);
#pragma omp for nowait
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
#pragma omp for
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
#pragma omp for private(jj)
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
bots_message(" completed!\n");
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
CorotateFEMConstraintActuate.h
|
#ifndef __COROTATE_FEM_CONSTRAINT_ACTUATE_H__
#define __COROTATE_FEM_CONSTRAINT_ACTUATE_H__
#include "Constraint.h"
#include "Tensor.h"
#include <Eigen/Core>
#include <Eigen/Sparse>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <Eigen/Eigenvalues>
#include <iostream>
namespace FEM
{
template <typename TinyScalar, typename TinyConstants>
class CorotateFEMConstraintActuate : public Constraint<TinyScalar, TinyConstants>
{
public:
CorotateFEMConstraintActuate(
const TinyScalar& stiffness,
const TinyScalar& poisson_ratio,
int i0,int i1,int i2,int i3,
TinyScalar volume,const Eigen::Matrix<TinyScalar, 3, 3>& invDm);
int GetI0() {return mi0;}
int GetI1() {return mi1;}
int GetI2() {return mi2;}
int GetI3() {return mi3;}
int GetDof() override;
ConstraintType GetType() override;
void SetActivationLevel(const TinyScalar& a);
// private:
void ComputeF(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x);
void ComputeP(Eigen::Matrix<TinyScalar, 3, 3>& P);
void ComputedPdF(Tensor3333& dPdF);
void ComputeSVD(const Eigen::Matrix<TinyScalar, 3, 3>& F);
void EvaluateJMatrix(int index, std::vector<Eigen::Triplet<TinyScalar>>& J_triplets);
void EvaluateLMatrix(std::vector<Eigen::Triplet<TinyScalar>>& L_triplets);
void EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x);
void GetDVector(int& index,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d);
void fixIndex(int offset);
// protected:
int mi0,mi1,mi2,mi3;
TinyScalar mVol;
TinyScalar mMu,mLambda;
TinyScalar mPoissonRatio;
Eigen::Matrix<TinyScalar, 3, 3> mInvDm;
Eigen::Matrix<TinyScalar, 3, 3> mDs; // [x1-x0; x2-x0; x3-x0]
Eigen::Matrix<TinyScalar, 3, 3> mF;
Eigen::Matrix<TinyScalar, 3, 3> mR,mU,mV,mD; // R=UV; D=svd.singularValues();
Eigen::Matrix<TinyScalar, 3, 3> md;
Eigen::Matrix<TinyScalar, 3, 3> md_volume;
TinyScalar mActivationLevel = 1;
};
#define EPS 1E-4
template <typename TinyScalar, typename TinyConstants>
CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
CorotateFEMConstraintActuate(const TinyScalar& stiffness,const TinyScalar& poisson_ratio,
int i0,int i1,int i2,int i3,TinyScalar vol,const Eigen::Matrix<TinyScalar, 3, 3>& invDm)
:Constraint<TinyScalar, TinyConstants>(stiffness),
mPoissonRatio(poisson_ratio),
mi0(i0),mi1(i1),mi2(i2),mi3(i3),
mMu(stiffness/((1.0+poisson_ratio))),
mLambda(stiffness*poisson_ratio/((1.0+poisson_ratio)*(1-2.0*poisson_ratio))),
mVol(vol),mInvDm(invDm),mDs(Eigen::Matrix<TinyScalar, 3, 3>::Zero())
{
mF.setZero();
mR.setZero();
mU.setZero();
mV.setZero();
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
ComputeF
(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x)
{
Eigen::Matrix<TinyScalar, 3, 1> x0(x.template block<3,1>(mi0*3,0));
Eigen::Matrix<TinyScalar, 3, 3> Ds;
Ds.template block<3,1>(0,0) = x.template block<3,1>(mi1*3,0)-x0;
Ds.template block<3,1>(0,1) = x.template block<3,1>(mi2*3,0)-x0;
Ds.template block<3,1>(0,2) = x.template block<3,1>(mi3*3,0)-x0;
mDs = Ds;
mF = mDs * mInvDm;
ComputeSVD(mF);
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::fixIndex(int offset) {
mi0 += offset;
mi1 += offset;
mi2 += offset;
mi3 += offset;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
ComputeP
(Eigen::Matrix<TinyScalar, 3, 3>& P)
{
P = mMu*(mF - mR)
+ mLambda*((mR.transpose()*mF-Eigen::Matrix<TinyScalar, 3, 3>::Identity()).trace())*mR;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
ComputedPdF
(Tensor3333& dPdF)
{
Tensor3333 dFdF, dRdF;
dFdF.SetIdentity();
for(int i =0;i<3;i++) {
for(int j=0;j<3;j++) {
Eigen::Matrix<TinyScalar, 3, 3> M = mU.transpose()*dFdF(i,j)*mV;
if(fabs(mD(0,0)-mD(1,1)) < EPS && fabs(mD(0,0)-mD(2,2)) < EPS) {
Eigen::Matrix<TinyScalar, 3, 3> off_diag_M;
off_diag_M.setZero();
for(int a=0; a<3; a++) {
for(int b=0; b<3; b++) {
if(a==b)
continue;
else
off_diag_M(a,b) = M(a,b) / mD(0,0);
}
}
dRdF(i,j) = mU*off_diag_M*mV.transpose();
} else {
Eigen::Matrix<TinyScalar, 2, 1> unknown_side, known_side;
Eigen::Matrix2d known_matrix;
Eigen::Matrix<TinyScalar, 3, 3> U_tilde, V_tilde;
U_tilde.setZero();
V_tilde.setZero();
Eigen::Matrix2d reg;
reg.setZero();
reg(0,0) = reg(1,1) = EPS;
for (unsigned int row = 0; row < 3; row++) {
for (unsigned int col = 0; col < row; col++) {
known_side = Eigen::Matrix<TinyScalar, 2, 1>(M(col, row), M(row, col));
known_matrix.block<2, 1>(0, 0) = Eigen::Matrix<TinyScalar, 2, 1>(-mD(row,row), mD(col,col));
known_matrix.block<2, 1>(0, 1) = Eigen::Matrix<TinyScalar, 2, 1>(-mD(col,col), mD(row,row));
if (fabs(mD(row,row) - mD(col,col) < EPS))
known_matrix += reg;
else
assert(fabs(known_matrix.determinant()) > 1E-6);
unknown_side = known_matrix.inverse() * known_side;
U_tilde(row, col) = unknown_side[0];
U_tilde(col, row) = -U_tilde(row, col);
V_tilde(row, col) = unknown_side[1];
V_tilde(col, row) = -V_tilde(row, col);
}
}
Eigen::Matrix<TinyScalar, 3, 3> deltaU = mU*U_tilde;
Eigen::Matrix<TinyScalar, 3, 3> deltaV = V_tilde*mV.transpose();
dRdF(i, j) = deltaU*mV.transpose() + mU*deltaV;
}
}
}
Tensor3333 lambda_term;
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
lambda_term(i,j) =
(dRdF(i,j).transpose()*mF+mR.transpose()*dFdF(i,j)).trace()*mR +
(mR.transpose()*mF-Eigen::Matrix<TinyScalar, 3, 3>::Identity()).trace()*dRdF(i,j);
}
}
dPdF = (dFdF-dRdF)*mMu + mLambda*lambda_term;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
ComputeSVD(const Eigen::Matrix<TinyScalar, 3, 3>& F)
{
// #pragma omp critical
// {
Eigen::JacobiSVD<Eigen::Matrix<TinyScalar, 3, 3>> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<TinyScalar, 3, 1> D = svd.singularValues();
mD.setZero();
mD(0,0) = D[0];
mD(1,1) = D[1];
mD(2,2) = D[2];
mU = svd.matrixU();
mV = svd.matrixV();
mR = mU*mV.transpose();
mF = F;
// }
}
template <typename TinyScalar, typename TinyConstants>
int CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
GetDof()
{
return 6;
}
template <typename TinyScalar, typename TinyConstants>
ConstraintType CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
GetType()
{
return ConstraintType::PNEUMATIC;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
EvaluateJMatrix(int index, std::vector<Eigen::Triplet<TinyScalar>>& J_triplets)
{
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> Ai(3*3,3*4);
TinyScalar d11 = mInvDm(0,0);
TinyScalar d12 = mInvDm(0,1);
TinyScalar d13 = mInvDm(0,2);
TinyScalar d21 = mInvDm(1,0);
TinyScalar d22 = mInvDm(1,1);
TinyScalar d23 = mInvDm(1,2);
TinyScalar d31 = mInvDm(2,0);
TinyScalar d32 = mInvDm(2,1);
TinyScalar d33 = mInvDm(2,2);
Ai<<
-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,0,
0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,
0,0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,
-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,0,
0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,
0,0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,
-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,0,
0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,
0,0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> MuAiT = mMu*mVol*Ai.transpose();
int idx[4] = {mi0,mi1,mi2,mi3};
for(int i =0;i<4;i++)
{
for(int j=0;j<3;j++)
{
//MuAiT.block [i,j] -- 3x3 matrix
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+0, MuAiT(3*i+0, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+1, MuAiT(3*i+0, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+2, MuAiT(3*i+0, 3*j+2)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+0, MuAiT(3*i+1, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+1, MuAiT(3*i+1, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+2, MuAiT(3*i+1, 3*j+2)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+0, MuAiT(3*i+2, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+1, MuAiT(3*i+2, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+2, MuAiT(3*i+2, 3*j+2)));
}
}
index+=3;
MuAiT = (MuAiT*mPoissonRatio).eval();
for(int i =0;i<4;i++)
{
for(int j=0;j<3;j++)
{
//MuAiT.block [i,j] -- 3x3 matrix
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+0, MuAiT(3*i+0, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+1, MuAiT(3*i+0, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+2, MuAiT(3*i+0, 3*j+2)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+0, MuAiT(3*i+1, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+1, MuAiT(3*i+1, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+2, MuAiT(3*i+1, 3*j+2)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+0, MuAiT(3*i+2, 3*j+0)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+1, MuAiT(3*i+2, 3*j+1)));
J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+2, MuAiT(3*i+2, 3*j+2)));
}
}
index+=3;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
EvaluateLMatrix(std::vector<Eigen::Triplet<TinyScalar>>& L_triplets)
{
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> Ai(3*3,3*4);
TinyScalar d11 = mInvDm(0,0);
TinyScalar d12 = mInvDm(0,1);
TinyScalar d13 = mInvDm(0,2);
TinyScalar d21 = mInvDm(1,0);
TinyScalar d22 = mInvDm(1,1);
TinyScalar d23 = mInvDm(1,2);
TinyScalar d31 = mInvDm(2,0);
TinyScalar d32 = mInvDm(2,1);
TinyScalar d33 = mInvDm(2,2);
Ai<<
-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,0,
0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,
0,0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,
-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,0,
0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,
0,0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,
-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,0,
0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,
0,0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> MuAiTAi = mMu*mVol*((Ai.transpose())*Ai);
int idx[4] = {mi0,mi1,mi2,mi3};
//MuAiT --- 12x12 matrix
for(int i =0;i<4;i++)
{
for(int j=0;j<4;j++)
{
//MuAiTAi.block [i,j] -- 3x3 matrix
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+0, MuAiTAi(3*i+0, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+1, MuAiTAi(3*i+0, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+2, MuAiTAi(3*i+0, 3*j+2)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+0, MuAiTAi(3*i+1, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+1, MuAiTAi(3*i+1, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+2, MuAiTAi(3*i+1, 3*j+2)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+0, MuAiTAi(3*i+2, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+1, MuAiTAi(3*i+2, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+2, MuAiTAi(3*i+2, 3*j+2)));
}
}
MuAiTAi = (MuAiTAi*mPoissonRatio).eval();
for(int i =0;i<4;i++)
{
for(int j=0;j<4;j++)
{
//MuAiTAi.block [i,j] -- 3x3 matrix
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+0, MuAiTAi(3*i+0, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+1, MuAiTAi(3*i+0, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+2, MuAiTAi(3*i+0, 3*j+2)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+0, MuAiTAi(3*i+1, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+1, MuAiTAi(3*i+1, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+2, MuAiTAi(3*i+1, 3*j+2)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+0, MuAiTAi(3*i+2, 3*j+0)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+1, MuAiTAi(3*i+2, 3*j+1)));
L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+2, MuAiTAi(3*i+2, 3*j+2)));
}
}
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x)
{
ComputeF(x);
md = mR;
if(mF.determinant()<0)
md.template block<3,1>(0,2) = -mR.template block<3,1>(0,2);
Eigen::Matrix<TinyScalar, 3, 1> S = mD.diagonal();
Eigen::Matrix<TinyScalar, 3, 1> D;
D.setZero();
TinyScalar CD;
for(int i=0;i<5;i++)
{
CD = (S[0]+D[0])*(S[1]+D[1])*(S[2]+D[2])-mActivationLevel;
Eigen::Matrix<TinyScalar, 3, 1> gradCD( (S[1]+D[1])*(S[2]+D[2]),
(S[0]+D[0])*(S[2]+D[2]),
(S[0]+D[0])*(S[1]+D[1]));
D = (gradCD.dot(D) -CD)/(gradCD.squaredNorm())*gradCD;
}
md_volume = mU*((S+D).asDiagonal())*mV.transpose();
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
GetDVector(int& index,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d)
{
d.template block<3,1>(3*(index+0),0) = md.template block<3,1>(0,0);
d.template block<3,1>(3*(index+1),0) = md.template block<3,1>(0,1);
d.template block<3,1>(3*(index+2),0) = md.template block<3,1>(0,2);
index+=3;
d.template block<3,1>(3*(index+0),0) = md_volume.template block<3,1>(0,0);
d.template block<3,1>(3*(index+1),0) = md_volume.template block<3,1>(0,1);
d.template block<3,1>(3*(index+2),0) = md_volume.template block<3,1>(0,2);
index+=3;
}
template <typename TinyScalar, typename TinyConstants>
void CorotateFEMConstraintActuate<TinyScalar, TinyConstants>::
SetActivationLevel(const TinyScalar& a)
{
mActivationLevel = a;
}
#undef EPS
};
#endif
|
decorate.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&border,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image->compose,image,x,y);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
exception=(&image->exception);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
msmGOptions.h
|
/* msmGOptions.h
Emma Robinson, FMRIB Image Analysis Group
Copyright (C) 2008 University of Oxford
Some sections of code inspired by A. Petrovic.
*/
/* CCOPYRIGHT */
#if !defined(msmGOptions_h)
#define msmGOptions_h
#include <string>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include "utils/options.h"
#include "utils/log.h"
using namespace Utilities;
using namespace NEWMESH;
class msmGOptions {
public:
static msmGOptions& getInstance();
~msmGOptions() { delete gopt; }
Option<bool> help;
Option<bool> verbose;
Option<bool> printoptions;
Option<bool> version;
Option<bool> debug;
Option<string> meshes;
Option<string> templatemesh;
Option<string> data;
Option<string> outbase;
Option<string> parameters; /// slowly replace most of these with the parameter file??
// Option<string> L1matlabpath;
Option<int> multiresolutionlevels;
Option<float> smoothoutput;
bool parse_command_line(int argc, char** argv,Log& logger);
vector<int> return_datarange(NEWMESH::newmesh);
private:
msmGOptions();
const msmGOptions& operator=(msmGOptions&);
msmGOptions(msmGOptions&);
OptionParser options;
static msmGOptions* gopt;
};
inline msmGOptions& msmGOptions::getInstance(){
if(gopt == NULL)
gopt = new msmGOptions();
return *gopt;
}
inline msmGOptions::msmGOptions() :
help(string("-h,--help"), false,
string("display this message"),
false, no_argument),
verbose(string("-v,--verbose"), false,
string("switch on diagnostic messages"),
false, no_argument),
printoptions(string("-p,--printoptions"), false,
string("print configuration file options"),
false, no_argument),
version(string("--version"), false,
string("report version informations"),
false, no_argument),
debug(string("--debug"), false,
string("run debugging or optimising options"),
false, no_argument),
meshes(string("--meshes"), string(""),
string("list of paths to input meshes (available formats: VTK, ASCII, GIFTI). Needs to be a sphere"),
true , requires_argument),
templatemesh(string("--template"), string(""),
string("templates sphere for resampling (available formats: VTK, ASCII, GIFTI). Needs to be a sphere"),
true , requires_argument),
data(string("--data"), string(""),
string("list of paths to the data"),
false , requires_argument),
outbase(string("-o,--out"), string(""),
string("output basename"),
true, requires_argument),
parameters(string("--conf"), string(""),
string("\tconfiguration file "),
false, requires_argument),
// L1matlabpath(string("--L1path"), string(""),
// string("\tPath to L1min matlab code"),
// false, requires_argument),
multiresolutionlevels(string("--levels"),0,
string("number of resolution levels (default = number of resolution levels specified by --opt in config file)"),
false, requires_argument),
smoothoutput(string("--smoothout"), 0,
string("smooth tranformed output with this sigma (default=0)"),
false, requires_argument),
options("msm", "msm [options]\n")
{
try {
options.add(help);
options.add(verbose);
options.add(printoptions);
options.add(version);
options.add(debug);
options.add(meshes);
options.add(templatemesh);
options.add(data);
options.add(outbase);
options.add(parameters);
// options.add(L1matlabpath);
options.add(multiresolutionlevels);
options.add(smoothoutput);
}
catch(X_OptionError& e) {
options.usage();
cerr << endl << e.what() << endl;
}
catch(std::exception &e) {
cerr << e.what() << endl;
}
}
inline bool msmGOptions::parse_command_line(int argc, char** argv,Log& logger){
for(int a = options.parse_command_line(argc, argv); a < argc; a++) ;
if(!(printoptions.value() || version.value()) ){
if(help.value() || ! options.check_compulsory_arguments())
{
options.usage();
//throw NEWMAT::Exception("Not all of the compulsory arguments have been provided");
exit(2);
}
logger.makeDir(outbase.value()+"logdir","MSM.log");
// do again so that options are logged
for(int a = 0; a < argc; a++)
logger.str() << argv[a] << " ";
logger.str() << endl << "---------------------------------------------" << endl << endl;
}
return true;
}
/* inline vector<int> msmOptions::return_datarange(NEWMESH::newmesh in){ */
/* vector<int> V; */
/* int range2; */
/* int i; */
/* int indexval=index.value(); */
/* if(range.value()<1E-7){ */
/* range2=in.nvertices(); */
/* cout << " range2 " << range2 << endl;; */
/* } */
/* else{ */
/* range2=range.value(); */
/* cout << " here2 " << endl; */
/* } */
/* if(patch.value()==""){ */
/* // D.ReSize(range); */
/* cout << " in return datarange" << range2 << " range.value() " << range.value() << endl; */
/* // cout << " range " << range << " patch " <<opts.patch.value() << " V.Nrows() " << V.Nrows() << endl; */
/* //shared(V,indexval,range2) private(i) */
/* #pragma omp parallel */
/* { */
/* // cout << " here " << endl; */
/* //cout << "omp_get_thread_num" << omp_get_thread_num() << endl; */
/* #pragma omp for nowait */
/* for( i=0;i<range2;i++) */
/* V.push_back(indexval+i); // labels index from 1 */
/* } */
/* } */
/* else{ */
/* Matrix patchm=read_ascii_matrix(patch.value()); */
/* if(in.get_pvalue(0)){ */
/* for (int i=0;i<in.nvertices();i++) */
/* in.set_pvalue(i,0); */
/* } */
/* cout << " label load not available for newmesh yet " << endl; */
/* exit(0); */
/* // in.load_fs_label(patch.value()); */
/* // cout << "here " << endl; */
/* int ind=0; */
/* for(vector<boost::shared_ptr<Mpoint> >::const_iterator i=in.vbegin();i!=in.vend();i++){ */
/* // cout << " here 1 " << (*i)->get_value() << endl; */
/* if(in.get_pvalue((*i)->get_no()) > 0){ */
/* V.push_back((*i)->get_no()+1); /// labels indexing runs from 1 !!! */
/* ind++; */
/* /// note this was in error before inasmuch as if I was using a patch I would count indexes from 0 but the datarange assumed indexing was running from 1 */
/* // cout << ind << " V(ind) " << V(ind) << endl; */
/* } */
/* } */
/* } */
/* return V; */
/* } */
#endif
|
bml_submatrix_ellsort_typed.c
|
#ifdef BML_USE_MAGMA
#include "magma_v2.h"
#endif
#include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_submatrix.h"
#include "../bml_types.h"
#include "../dense/bml_allocate_dense.h"
#include "bml_allocate_ellsort.h"
#include "bml_submatrix_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param A Hamiltonian matrix A
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and number of cores
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void TYPED_FUNC(
bml_matrix2submatrix_index_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
int l, ll, ii, ls, k;
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
int ix[A_N];
memset(ix, 0, A_N * sizeof(int));
l = 0;
ll = 0;
// Cores are first followed by halos
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
if (ix[ii] == 0)
{
ix[ii] = ii + 1;
core_halo_index[l] = ii;
l++;
ll++;
}
}
// Collect halo indeces from graph
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Add more halo elements from H
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < A_nnz[ii]; jp++)
{
k = A_index[ROWMAJOR(ii, jp, A_N, A_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Perform a "double jump" for extra halo elements
// based on graph, like performing a symbolic X^2
if (double_jump_flag == 1)
{
ls = l;
for (int j = 0; j < ls; j++)
{
ii = core_halo_index[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
}
vsize[0] = l;
vsize[1] = ll;
}
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and number of cores
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void TYPED_FUNC(
bml_matrix2submatrix_index_graph_ellsort) (
bml_matrix_ellsort_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
int l, ll, ii, ls, k;
int B_N = B->N;
int B_M = B->M;
int *B_index = B->index;
int *B_nnz = B->nnz;
int ix[B_N];
memset(ix, 0, B_N * sizeof(int));
l = 0;
ll = 0;
// Cores are first followed by halos
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
if (ix[ii] == 0)
{
ix[ii] = ii + 1;
core_halo_index[l] = ii;
l++;
ll++;
}
}
// Collext halo indeces from graph
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Use graph for double jumps
if (double_jump_flag == 1)
{
ls = l;
for (int j = 0; j < ls; j++)
{
ii = core_halo_index[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
}
vsize[0] = l;
vsize[1] = ll;
}
/** Extract a submatrix from a matrix given a set of core+halo rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param B Submatrix B
* \param core_halo_index Set of row indeces for submatrix
* \param llsize Number of indeces
*/
void TYPED_FUNC(
bml_matrix2submatrix_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_dense_t * B,
int *core_halo_index,
int lsize)
{
REAL_T *rvalue;
int B_N = B->N;
#ifdef BML_USE_MAGMA
REAL_T *B_matrix = bml_allocate_memory(sizeof(REAL_T) * B->N * B->N);
#else
REAL_T *B_matrix = B->matrix;
#endif
#pragma omp parallel for \
private(rvalue) \
shared(core_halo_index) \
shared(A, B_matrix, B_N)
for (int jb = 0; jb < lsize; jb++)
{
rvalue = TYPED_FUNC(bml_getVector_ellsort) (A, core_halo_index,
core_halo_index[jb],
lsize);
for (int j = 0; j < lsize; j++)
{
B_matrix[ROWMAJOR(jb, j, B_N, B_N)] = rvalue[j];
}
free(rvalue);
}
#ifdef BML_USE_MAGMA
MAGMA(setmatrix) (B_N, B_N, (MAGMA_T *) B_matrix, B_N,
B->matrix, B->ld, bml_queue());
bml_free_memory(B_matrix);
#endif
}
/** Assemble submatrix into a full matrix based on core+halo indeces.
*
* \ingroup submatrix_group_C
*
* \param A Submatrix A
* \param B Matrix B
* \param core_halo_index Set of submatrix row indeces
* \param lsize Number of indeces
* \param llsize Number of core positions
*/
void TYPED_FUNC(
bml_submatrix2matrix_ellsort) (
bml_matrix_dense_t * A,
bml_matrix_ellsort_t * B,
int *core_halo_index,
int lsize,
int llsize,
double threshold)
{
int A_N = A->N;
#ifdef BML_USE_MAGMA
REAL_T *A_matrix = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N);
MAGMA(getmatrix) (A->N, A->N,
A->matrix, A->ld, (MAGMA_T *) A_matrix, A->N,
bml_queue());
#else
REAL_T *A_matrix = A->matrix;
#endif
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
REAL_T *B_value = B->value;
int ii, icol;
#pragma omp parallel for \
private(ii, icol) \
shared(core_halo_index) \
shared(A_N, A_matrix) \
shared(B_N, B_M, B_nnz, B_index, B_value)
for (int ja = 0; ja < llsize; ja++)
{
ii = core_halo_index[ja];
icol = 0;
for (int jb = 0; jb < lsize; jb++)
{
if (ABS(A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]) > threshold)
{
B_index[ROWMAJOR(ii, icol, B_N, B_M)] = core_halo_index[jb];
B_value[ROWMAJOR(ii, icol, B_N, B_M)] =
A_matrix[ROWMAJOR(ja, jb, A_N, A_N)];
icol++;
}
}
if (icol > B_M)
{
LOG_ERROR("Number of non-zeroes per row >= M, Increase M\n");
}
B_nnz[ii] = icol;
}
#ifdef BML_USE_MAGMA
bml_free_memory(A_matrix);
#endif
}
// Get matching vector of values
void *TYPED_FUNC(
bml_getVector_ellsort) (
bml_matrix_ellsort_t * A,
int *jj,
int irow,
int colCnt)
{
REAL_T ZERO = 0.0;
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
REAL_T *A_value = A->value;
REAL_T *rvalue = bml_noinit_allocate_memory(colCnt * sizeof(REAL_T));
for (int i = 0; i < colCnt; i++)
{
for (int j = 0; j < A_nnz[irow]; j++)
{
if (A_index[ROWMAJOR(irow, j, A_N, A_M)] == jj[i])
{
rvalue[i] = A_value[ROWMAJOR(irow, j, A_N, A_M)];
break;
}
rvalue[i] = ZERO;
}
}
return rvalue;
}
/** Assemble matrix based on groups of rows from a matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param ngroups Number of groups
* \param threshold Threshold for graph
*/
bml_matrix_ellsort_t
* TYPED_FUNC(bml_group_matrix_ellsort) (bml_matrix_ellsort_t * A,
int *hindex, int ngroups,
double threshold)
{
int A_N = A->N;
int A_M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
#if !(defined(__IBMC_) || defined(__ibmxl__))
int ix[ngroups];
memset(ix, 0, sizeof(int) * ngroups);
#endif
int hnode[A_N];
int hend;
bml_matrix_dimension_t matrix_dimension = { ngroups, ngroups, ngroups };
bml_matrix_ellsort_t *B =
TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension,
A->distribution_mode);
int B_N = B->N;
int B_M = B->M;
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode, A_N)
for (int i = 0; i < ngroups; i++)
{
hend = hindex[i + 1] - 1;
if (i == ngroups - 1)
hend = A_N;
for (int j = hindex[i] - 1; j < hend; j++)
{
hnode[j] = i;
}
}
#if defined(__IBMC_) || defined(__ibmxl__)
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode) \
shared(A_nnz, A_index, A_value, A_N, A_M) \
shared(B_nnz, B_index, B_value, B_N, B_M)
#else
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode) \
shared(A_nnz, A_index, A_value, A_N, A_M) \
shared(B_nnz, B_index, B_value, B_N, B_M) \
firstprivate(ix)
#endif
for (int i = 0; i < B_N; i++)
{
#if defined(__IBMC_) || defined(__ibmxl__)
int ix[ngroups];
memset(ix, 0, sizeof(int) * ngroups);
#endif
B_nnz[i] = 0;
hend = hindex[i + 1] - 1;
if (i == B_N - 1)
hend = A_N;
for (int j = hindex[i] - 1; j < hend; j++)
{
for (int k = 0; k < A_nnz[j]; k++)
{
int ii = hnode[A_index[ROWMAJOR(j, k, A_N, A_M)]];
if (ix[ii] == 0 &&
is_above_threshold(A_value[ROWMAJOR(j, k, A_N, A_M)],
threshold))
{
ix[ii] = i + 1;
B_index[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = ii;
B_value[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = 1.0;
B_nnz[i]++;
}
}
}
}
return B;
}
/** Extract submatrix into new matrix of same format
*
* \ingroup submatrix_group_C
*
* \param A Matrix A to extract submatrix from
* \param irow Index of first row to extract
* \param icol Index of first column to extract
* \param B_N Number of rows/columns to extract
* \param B_M Max number of non-zero elemnts/row in exttacted matrix
*/
bml_matrix_ellsort_t
* TYPED_FUNC(bml_extract_submatrix_ellsort) (bml_matrix_ellsort_t * A,
int irow, int icol,
int B_N, int B_M)
{
int A_N = A->N;
int A_M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
bml_matrix_ellsort_t *B;
B = TYPED_FUNC(bml_zero_matrix_ellsort) (B_N, B_M, A->distribution_mode);
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
// loop over subset of rows of A
for (int i = irow; i < irow + B_N; i++)
{
for (int jp = 0; jp < A_nnz[i]; jp++)
{
int j = A_index[ROWMAJOR(i, jp, A_N, A_M)];
if (j >= icol && j < icol + B_N)
{
int iB = i - irow;
B_index[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] = j - icol;
B_value[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] =
A_value[ROWMAJOR(i, jp, A_N, A_M)];
B_nnz[iB]++;
}
}
}
return B;
}
/** Assign a block B into matrix A
*
* \param A Matrix A
* \param B Matrix B
* \param irow First row where to insert block B
* \param icol Offset column to insert block B
*/
void TYPED_FUNC(
bml_assign_submatrix_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
int irow,
int icol)
{
int A_N = A->N;
int A_M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
int B_N = B->N;
int B_M = B->M;
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
// loop over rows of B
for (int i = 0; i < B_N; i++)
{
for (int jp = 0; jp < B_nnz[i]; jp++)
{
int jB = B_index[ROWMAJOR(i, jp, B_N, B_M)];
int jpA = A_nnz[i + irow];
A_value[ROWMAJOR(i + irow, jpA, A_N, A_M)] =
B_value[ROWMAJOR(i, jp, B_N, B_M)];
A_index[ROWMAJOR(i + irow, jpA, A_N, A_M)] = jB + icol;
A_nnz[i + irow]++;
}
}
}
|
ZQ_FaceDatabaseMaker.h
|
#ifndef _ZQ_FACE_DATABASE_MAKER_H_
#define _ZQ_FACE_DATABASE_MAKER_H_
#pragma once
#include <sstream>
#include <direct.h>
#include <windows.h>
#include <io.h>
#include <omp.h>
#include <opencv2\opencv.hpp>
#include "ZQ_FaceDetector.h"
#include "ZQ_FaceRecognizer.h"
#include "ZQ_FaceDatabase.h"
#include "ZQ_FaceDatabaseCompact.h"
#include "ZQ_FaceRecognizerSphereFace.h"
#include "ZQ_MergeSort.h"
namespace ZQ
{
class ZQ_FaceDatabaseMaker
{
public:
enum ErrorCode
{
ERR_WARNING = 0,
ERR_FATAL = 1
};
enum MakeDatabaseType
{
ONLY_MERGE_FEATS = 0,
UPDATE_WHO_NOT_HAVE_FEATS = 1,
FORCE_UPDATE_ALL = 2
};
public:
static bool MakeDatabase(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < detectors.size(); i++)
if (detectors[i] == 0)
return false;
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database(detectors, recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, false);
}
static bool MakeDatabaseAlreadyCropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database_already_cropped(recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, false);
}
static bool MakeDatabaseCompact(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < detectors.size(); i++)
if (detectors[i] == 0)
return false;
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database(detectors, recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, true);
}
static bool MakeDatabaseCompactAlreadyCropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database_already_cropped(recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, true);
}
static bool CropImagesForDatabase(const std::vector<ZQ_FaceDetector*>& detectors, const std::vector<ZQ_FaceRecognizer*>& recognizers,
const std::string& src_root, const std::string& dst_root, int max_thread_num = 4, bool strict_check = true,
std::string err_logfile = "err_log.txt", bool only_for_high_quality = false)
{
return _crop_images_for_database(detectors, recognizers, src_root, dst_root, max_thread_num, strict_check, err_logfile, only_for_high_quality);
}
/*must be cropped image*/
static bool DetectOutliersInDatabase(const std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& src_root, int max_thread_num = 4,
const std::string out_file = "outlier_score.txt")
{
return _detect_outliers_in_database(recognizers, src_root, max_thread_num, out_file);
}
private:
static bool _make_database(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1, bool compact = false)
{
if (type != ONLY_MERGE_FEATS && type != UPDATE_WHO_NOT_HAVE_FEATS && type != FORCE_UPDATE_ALL)
{
printf("type must be : ONLY_MERGE_FEATS(%d), UPDATE_WHO_NOT_HAVE_FEATS(%d), FORCE_UPDATE_ALL(%d)\n",
ONLY_MERGE_FEATS, UPDATE_WHO_NOT_HAVE_FEATS, FORCE_UPDATE_ALL);
return false;
}
int num_detectors = detectors.size();
int num_recognizers = recognizers.size();
if (num_detectors == 0 || num_recognizers == 0)
{
printf("You should use at least one detector and one recognizer\n");
return false;
}
ZQ_FaceDatabase database;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
std::string err_logfile = "err_log.txt";
std::ostringstream oss;
std::vector<std::string> person_names;
std::vector<std::vector<std::string> > filenames;
std::vector<std::vector<ZQ_CNN_BBox> > boxes;
std::vector<std::vector<bool> > fail_flag;
_auto_detect_database(database_root, person_names, filenames);
int num_cores = omp_get_num_procs() - 1;
int real_thread_num = __min(max_thread_num, __min(num_cores, __min(num_detectors, num_recognizers)));
printf("real_thread_num = %d\n", real_thread_num);
int feat_dim = 0;
feat_dim = recognizers[0]->GetFeatDim();
/****************************************/
int person_num = person_names.size();
database.persons.resize(person_num);
database.names = person_names;
double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int> > pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
#pragma omp parallel for schedule(dynamic, 100) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
std::ostringstream oss;
ZQ_FaceFeature feat;
cv::Mat crop;
ErrorCode err_code;
std::string err_msg;
bool has_feat = false;
bool need_detect = false;
if (type == ONLY_MERGE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = false;
has_feat = false;
}
else
has_feat = true;
}
else if (type == UPDATE_WHO_NOT_HAVE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = true;
has_feat = false;
}
else
has_feat = true;
}
else if (type == FORCE_UPDATE_ALL)
need_detect = true;
bool need_write = false;
bool ret = true;
if (need_detect)
{
if (!_extract_feature_from_img(*detectors[id], *recognizers[id], filenames[i][j], feat, crop, err_code, err_msg, false))
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
need_write = true;
has_feat = true;
if (show_face)
{
if (id == 0)
{
cv::namedWindow("crop");
cv::imshow("crop", crop);
cv::waitKey(5);
}
}
}
#pragma omp critical
{
if (has_feat)
{
database.persons[i].features.push_back(feat);
database.persons[i].filenames.push_back(filenames[i][j]);
}
}
if (need_write)
_write_feature_to_file(filenames[i][j], feat);
}
double end_time = omp_get_wtime();
printf("detect_and_extract total_cost:%.3f s\n", (end_time - start_time));
/*******************/
for (int i = person_num - 1; i >= 0; i--)
{
if (database.persons[i].features.size() == 0)
{
printf("person [%d]: %s has no data\n", i, person_names[i].c_str());
oss.str("");
oss << "person [" << i << "]: " << person_names[i] << " has no data";
ErrorCodes.push_back(ERR_WARNING);
error_messages.push_back(oss.str());
database.persons.erase(database.persons.begin() + i);
person_names.erase(person_names.begin() + i);
}
}
database.names = person_names;
if (compact)
{
if (!database.SaveToFileBinaryCompact(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
else
{
if (!database.SaveToFileBinary(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
printf("all done\n");
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return EXIT_SUCCESS;
}
static bool _make_database_already_cropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1, bool compact = false)
{
if (type != ONLY_MERGE_FEATS && type != UPDATE_WHO_NOT_HAVE_FEATS && type != FORCE_UPDATE_ALL)
{
printf("type must be : ONLY_MERGE_FEATS(%d), UPDATE_WHO_NOT_HAVE_FEATS(%d), FORCE_UPDATE_ALL(%d)\n",
ONLY_MERGE_FEATS, UPDATE_WHO_NOT_HAVE_FEATS, FORCE_UPDATE_ALL);
return false;
}
int num_recognizers = recognizers.size();
if (num_recognizers == 0)
{
printf("You should use at least one recognizer\n");
return false;
}
ZQ_FaceDatabase database;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
std::string err_logfile = "err_log.txt";
std::ostringstream oss;
std::vector<std::string> person_names;
std::vector<std::vector<std::string> > filenames;
std::vector<std::vector<ZQ_CNN_BBox> > boxes;
std::vector<std::vector<bool> > fail_flag;
_auto_detect_database(database_root, person_names, filenames);
int num_cores = omp_get_num_procs() - 1;
int real_thread_num = __min(max_thread_num, __min(num_cores, num_recognizers));
printf("real_thread_num = %d\n", real_thread_num);
int feat_dim = 0;
feat_dim = recognizers[0]->GetFeatDim();
/****************************************/
int person_num = person_names.size();
database.persons.resize(person_num);
database.names = person_names;
double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int> > pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
#pragma omp parallel for schedule(dynamic, 100) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
std::ostringstream oss;
ZQ_FaceFeature feat;
cv::Mat crop;
ErrorCode err_code;
std::string err_msg;
bool has_feat = false;
bool need_detect = false;
if (type == ONLY_MERGE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = false;
has_feat = false;
}
else
has_feat = true;
}
else if (type == UPDATE_WHO_NOT_HAVE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = true;
has_feat = false;
}
else
has_feat = true;
}
else if (type == FORCE_UPDATE_ALL)
need_detect = true;
bool need_write = false;
bool ret = true;
if (need_detect)
{
if (!_extract_feature_from_cropped_image(*recognizers[id], filenames[i][j], feat, crop, err_code, err_msg))
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
need_write = true;
has_feat = true;
if (show_face)
{
if (id == 0)
{
cv::namedWindow("crop");
cv::imshow("crop", crop);
cv::waitKey(5);
}
}
}
#pragma omp critical
{
if (has_feat)
{
database.persons[i].features.push_back(feat);
database.persons[i].filenames.push_back(filenames[i][j]);
}
}
if (need_write)
_write_feature_to_file(filenames[i][j], feat);
}
double end_time = omp_get_wtime();
printf("detect_and_extract total_cost:%.3f s\n", (end_time - start_time));
/*******************/
for (int i = person_num - 1; i >= 0; i--)
{
if (database.persons[i].features.size() == 0)
{
printf("person [%d]: %s has no data\n", i, person_names[i].c_str());
oss.str("");
oss << "person [" << i << "]: " << person_names[i] << " has no data";
ErrorCodes.push_back(ERR_WARNING);
error_messages.push_back(oss.str());
database.persons.erase(database.persons.begin() + i);
person_names.erase(person_names.begin() + i);
}
}
database.names = person_names;
if (compact)
{
if (!database.SaveToFileBinaryCompact(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
else
{
if (!database.SaveToFileBinary(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
printf("all done\n");
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return EXIT_SUCCESS;
}
static bool _crop_images_for_database(const std::vector<ZQ_FaceDetector*>& detectors, const std::vector<ZQ_FaceRecognizer*>& recognizers,
const std::string& src_root, const std::string& dst_root, int max_thread_num = 4, bool strict_check = true, std::string err_logfile = "err_log.txt",
bool only_for_high_quality = false)
{
int num_detector = detectors.size();
int num_recognizer = recognizers.size();
if (num_detector == 0 || num_recognizer == 0)
return false;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
int num_cores = omp_get_num_procs();
int real_thread_num = __max(1, __min(num_cores - 1, max_thread_num));
real_thread_num = __min(real_thread_num, __min(detectors.size(), recognizers.size()));
std::vector<std::string> person_names;
std::vector<std::vector<std::string> > filenames;
_auto_detect_database(src_root, person_names, filenames);
int person_num = person_names.size();
_mkdir(dst_root.c_str());
for (int i = 0; i < person_num; i++)
{
std::string path = dst_root + "\\" + person_names[i];
_mkdir(path.c_str());
}
clock_t start_time = clock();
//double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int> > pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
clock_t start = clock();
if (real_thread_num == 1)
{
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
int crop_width = recognizers[id]->GetCropWidth();
int crop_height = recognizers[id]->GetCropHeight();
std::ostringstream oss;
cv::Mat crop(crop_height, crop_width, CV_MAKETYPE(8, 3));
ErrorCode err_code;
std::string err_msg;
bool ret = true;
cv::Mat image = cv::imread(filenames[i][j]);
if (image.empty())
{
printf("failed to read image: %s\n", filenames[i][j].c_str());
oss.str("");
oss << "failed to read image: " << filenames[i][j];
err_code = ERR_WARNING;
err_msg = oss.str();
ret = false;
}
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
ZQ_CNN_BBox box;
ret = _get_face5point_from_img(*detectors[id], filenames[i][j], image, box, err_code, err_msg, strict_check, only_for_high_quality);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
float facial5point[10] =
{
box.ppoint[0],box.ppoint[5],
box.ppoint[1],box.ppoint[6],
box.ppoint[2],box.ppoint[7],
box.ppoint[3],box.ppoint[8],
box.ppoint[4],box.ppoint[9]
};
ret = recognizers[id]->CropImage(image.data, image.cols, image.rows, image.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
size_t pos = filenames[i][j].find_last_of('\\');
if (pos != std::string::npos)
{
std::string real_name(filenames[i][j].c_str() + pos + 1);
std::string fullname = dst_root + "\\" + person_names[i] + "\\" + real_name;
cv::imwrite(fullname, crop);
#pragma omp critical
{
//std::cout << fullname << "\n";
}
}
}
}
else
{
#pragma omp parallel for schedule(dynamic, 10) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
int crop_width = recognizers[id]->GetCropWidth();
int crop_height = recognizers[id]->GetCropHeight();
std::ostringstream oss;
cv::Mat crop(crop_height, crop_width, CV_MAKETYPE(8, 3));
ErrorCode err_code;
std::string err_msg;
bool ret = true;
cv::Mat image = cv::imread(filenames[i][j]);
if (image.empty())
{
printf("failed to read image: %s\n", filenames[i][j].c_str());
oss.str("");
oss << "failed to read image: " << filenames[i][j];
err_code = ERR_WARNING;
err_msg = oss.str();
ret = false;
}
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
ZQ_CNN_BBox box;
ret = _get_face5point_from_img(*detectors[id], filenames[i][j], image, box, err_code, err_msg, strict_check, only_for_high_quality);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
float facial5point[10] =
{
box.ppoint[0],box.ppoint[5],
box.ppoint[1],box.ppoint[6],
box.ppoint[2],box.ppoint[7],
box.ppoint[3],box.ppoint[8],
box.ppoint[4],box.ppoint[9]
};
ret = recognizers[id]->CropImage(image.data, image.cols, image.rows, image.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
size_t pos = filenames[i][j].find_last_of('\\');
if (pos != std::string::npos)
{
std::string real_name(filenames[i][j].c_str() + pos + 1);
std::string fullname = dst_root + "\\" + person_names[i] + "\\" + real_name;
cv::imwrite(fullname, crop);
#pragma omp critical
{
//std::cout << fullname << "\n";
}
}
}
}
clock_t end = clock();
printf("time: %f\n", 0.001*(end - start));
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return true;
}
static bool _extract_feature_from_box(ZQ_FaceRecognizer& recognizer, const std::string& imgfile, const cv::Mat& image, const ZQ_CNN_BBox& box,
ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg)
{
int nChannels = image.channels();
ZQ_PixelFormat pixFmt = (nChannels == 1) ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
int width = recognizer.GetCropWidth();
int height = recognizer.GetCropHeight();
int feat_dim = recognizer.GetFeatDim();
crop = cv::Mat(cv::Size(width, height), CV_MAKETYPE(8U, 3));
std::ostringstream oss;
if (!recognizer.CropImage(image.data, image.cols, image.rows, image.step[0], pixFmt, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]))
{
printf("failed to crop face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to crop face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
feat.ChangeSize(feat_dim);
if (!recognizer.ExtractFeature(crop.data, crop.step[0], pixFmt, feat.pData, true))
{
printf("failed to extract feature in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to extract feature in image: " << imgfile.c_str();
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
return true;
}
static bool _extract_feature_from_cropped_image(ZQ_FaceRecognizer& recognizer, const std::string& imgfile, const cv::Mat& image,
ZQ_FaceFeature& feat, ErrorCode& err_code, std::string& err_msg)
{
int nChannels = image.channels();
ZQ_PixelFormat pixFmt = (nChannels == 1) ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
int width = recognizer.GetCropWidth();
int height = recognizer.GetCropHeight();
int feat_dim = recognizer.GetFeatDim();
std::ostringstream oss;
feat.ChangeSize(feat_dim);
if (image.cols != width || image.rows != height
|| !recognizer.ExtractFeature(image.data, image.step[0], pixFmt, feat.pData, true))
{
printf("failed to extract feature in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to extract feature in image: " << imgfile.c_str();
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
return true;
}
static bool _extract_feature_from_img(ZQ_FaceDetector& detector, ZQ_FaceRecognizer& recognizer,
const std::string& imgfile, ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg,
bool only_for_high_quality)
{
std::ostringstream oss;
cv::Mat image = cv::imread(imgfile);
if (image.empty())
{
printf("failed to read image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to read image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
double t1 = omp_get_wtime();
ZQ_CNN_BBox box;
if (!_get_face5point_from_img(detector, imgfile, image, box, err_code, err_msg, only_for_high_quality))
return false;
double t2 = omp_get_wtime();
if (!_extract_feature_from_box(recognizer, imgfile, image, box, feat, crop, err_code, err_msg))
return false;
double t3 = omp_get_wtime();
printf("image: %s done! findface: %.3f, extract: %.3f\n", imgfile.c_str(), t2 - t1, t3 - t2);
return true;
}
static bool _extract_feature_from_cropped_image(ZQ_FaceRecognizer& recognizer,
const std::string& imgfile, ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg)
{
std::ostringstream oss;
cv::Mat image = cv::imread(imgfile);
if (image.empty())
{
printf("failed to read image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to read image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
double t1 = omp_get_wtime();
if (!_extract_feature_from_cropped_image(recognizer, imgfile, image, feat, err_code, err_msg))
return false;
double t2 = omp_get_wtime();
printf("image: %s done! extract: %.3f\n", imgfile.c_str(), t2 - t1);
crop = image;
return true;
}
static bool _load_feature_from_file(const std::string& imgfile, ZQ_FaceFeature& feat)
{
std::string feat_file = imgfile + ".imgfeat";
FILE* in = 0;
if( 0 != fopen_s(&in, feat_file.c_str(), "rb"))
return false;
int feat_dim = 0;
fread(&feat_dim, sizeof(int), 1, in);
feat.ChangeSize(feat_dim);
fread(feat.pData, sizeof(float), feat_dim, in);
fclose(in);
return true;
}
static bool _write_feature_to_file(const std::string& imgfile, const ZQ_FaceFeature& feat)
{
std::string feat_file = imgfile + ".imgfeat";
FILE* out = 0;
if (0 != fopen_s(&out, feat_file.c_str(), "wb"))
return false;
int feat_dim = feat.length;
fwrite(&feat_dim, sizeof(int), 1, out);
fwrite(feat.pData, sizeof(float), feat_dim, out);
fclose(out);
return true;
}
static bool _write_error_messages(const std::string& file, const std::vector<ErrorCode>& ErrorCodes, const std::vector<std::string>& error_messages)
{
FILE* out = 0;
if(0 != fopen_s(&out, file.c_str(), "w"))
return false;
int num = ErrorCodes.size();
if (num != error_messages.size())
return false;
for (int i = 0; i < num; i++)
{
fprintf(out, "err_code: %d: msg: %s\n", ErrorCodes[i], error_messages[i].c_str());
}
fclose(out);
return true;
}
static bool _auto_detect_database(const std::string& root_path, std::vector<std::string>& person_names, std::vector<std::vector<std::string> >& filenames)
{
std::string dir(root_path);
dir.append("\\*.*");
_finddata_t fileDir;
intptr_t lfDir;
person_names.clear();
filenames.clear();
if ((lfDir = _findfirst(dir.c_str(), &fileDir)) == -1l)
{
//printf("No file is found\n");
}
else
{
do {
std::string str(fileDir.name);
if (fileDir.attrib & _A_SUBDIR && 0 != strcmp(str.c_str(), ".") && 0 != strcmp(str.c_str(), ".."))
person_names.push_back(str);
} while (_findnext(lfDir, &fileDir) == 0);
}
_findclose(lfDir);
int person_num = person_names.size();
filenames.resize(person_num);
for (int i = 0; i < person_num; i++)
{
dir = root_path + "\\" + person_names[i] + "\\*.jpg";
if ((lfDir = _findfirst(dir.c_str(), &fileDir)) == -1l)
{
//printf("No file is found\n");
}
else
{
do {
std::string str(fileDir.name);
filenames[i].push_back(root_path + "\\" + person_names[i] + "\\" + str);
} while (_findnext(lfDir, &fileDir) == 0);
}
_findclose(lfDir);
}
return true;
}
static bool _write_database_txt(const std::string& data_base_file, const std::vector<std::vector<std::string> >& filenames)
{
int num = filenames.size();
FILE* out = 0;
if(0 != fopen_s(&out, data_base_file.c_str(), "w"))
{
return false;
}
fprintf(out, "%d\n", num);
for (int i = 0; i < num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
{
fprintf(out, "%d\n%s\n", i, filenames[i][j].c_str());
}
}
fprintf(out, "\n");
fclose(out);
return true;
}
static bool _get_face5point_from_img(ZQ_FaceDetector& detector, const std::string& imgfile, const cv::Mat& image, ZQ_CNN_BBox& box,
ErrorCode& err_code, std::string& err_msg, bool strict_check = true, bool only_for_high_quality = false)
{
std::ostringstream oss;
bool use_cuda = false;
std::vector<ZQ_CNN_BBox> bbox;
//first try
bool has_found = false;
ZQ_PixelFormat pixFmt = image.channels() == 1 ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 60, 0.709, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
if (!only_for_high_quality)
{
//second try
if (!has_found)
{
printf("second try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 40, 0.709, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//third try
if (!has_found)
{
printf("third try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 30, 0.8, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//fourth try
if (!has_found)
{
printf("fourth try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 20, 0.85, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//fifth try
if (!has_found)
{
printf("fifth try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 12, 0.9, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
}
if (!has_found)
{
printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
if (bbox.size() > 1)
{
if (strict_check)
{
printf("find more than one face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "find more than one face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
//pick the face closed to the center
float center[2] = { image.cols*0.5,image.rows*0.5 };
std::vector<float> distance(bbox.size());
for (int i = 0; i < bbox.size(); i++)
{
float cx = 0.5*(bbox[i].col1 + bbox[i].col2);
float cy = 0.5*(bbox[i].row1 + bbox[i].row2);
distance[i] = (center[0] - cx)*(center[0] - cx) + (center[1] - cy)*(center[1] - cy);
}
float min_dis = distance[0];
int min_id = 0;
for (int i = 1; i < bbox.size(); i++)
{
if (min_dis > distance[i])
{
min_dis = distance[i];
min_id = i;
}
}
box = bbox[min_id];
return true;
}
else
box = bbox[0];
return true;
}
static bool _detect_outliers_in_database(const std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& src_root, int max_thread_num,
const std::string& out_file)
{
int num_recognizer = recognizers.size();
if (num_recognizer == 0)
return false;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
int num_cores = omp_get_num_procs();
int real_thread_num = __max(1, __min(num_cores - 1, max_thread_num));
real_thread_num = __min(real_thread_num, recognizers.size());
std::vector<std::string> person_names;
std::vector<float> person_min_scores;
std::vector<int> person_min_scores_i;
std::vector<int> person_min_scores_j;
std::vector<std::vector<std::string> > filenames;
_auto_detect_database(src_root, person_names, filenames);
int person_num = person_names.size();
if (person_num == 0)
{
printf("no person in %s\n", src_root.c_str());
return false;
}
person_min_scores.resize(person_num);
person_min_scores_i.resize(person_num);
person_min_scores_j.resize(person_num);
if (real_thread_num == 1)
{
for (int i = 0; i < person_num; i++)
{
float out_min_score;
int out_i, out_j;
if (!_detect_outlier_for_one_person(*(recognizers[0]), filenames[i], out_min_score, out_i, out_j))
{
printf("failed to detect outliter for %s\n", person_names[i].c_str());
return false;
}
if (filenames[i].size() == 0)
out_min_score = 100;
else if (filenames[i].size() == 1)
out_min_score = 10;
person_min_scores[i] = out_min_score;
person_min_scores_i[i] = out_i;
person_min_scores_j[i] = out_j;
//if ((i + 1) % 100 == 0)
{
printf("%d/%d handled\n", i + 1, person_num);
}
}
}
else
{
int handled[1] = { 0 };
#pragma omp parallel for num_threads(real_thread_num)
for (int i = 0; i < person_num; i++)
{
int thread_id = omp_get_thread_num();
float out_min_score;
int out_i, out_j;
if (!_detect_outlier_for_one_person(*(recognizers[thread_id]), filenames[i], out_min_score, out_i, out_j))
{
printf("failed to detect outliter for %s\n", person_names[i].c_str());
out_min_score = -1000;
}
if (filenames[i].size() == 0)
out_min_score = 100;
else if (filenames[i].size() == 1)
out_min_score = 10;
person_min_scores[i] = out_min_score;
person_min_scores_i[i] = out_i;
person_min_scores_j[i] = out_j;
#pragma omp critical
{
(*handled)++;
//if (handled % 100 == 0)
{
printf("%d/%d handled\n", *handled,person_num);
}
}
}
}
std::vector<int> sort_indices(person_num);
for (int i = 0; i < person_num; i++)
sort_indices[i] = i;
ZQ_MergeSort::MergeSort(&person_min_scores[0], &sort_indices[0], person_num, true);
FILE* out = 0;
if (0 != fopen_s(&out,out_file.c_str(), "w"))
{
printf("failed to create file %s\n", out_file.c_str());
return false;
}
for (int i = 0; i < person_num; i++)
{
int id = sort_indices[i];
fprintf(out, "%12.3f %s ", person_min_scores[i], person_names[id].c_str());
int img_num = filenames[id].size();
int out_i = person_min_scores_i[id];
int out_j = person_min_scores_j[id];
if (img_num > 1 && out_i >= 0 && out_i < img_num
&& out_j >= 0 && out_j < img_num)
{
fprintf(out, "%s %s\n", filenames[id][out_i].c_str(), filenames[id][out_j].c_str());
}
else
{
fprintf(out, "\n");
}
}
fclose(out);
return true;
}
static bool _detect_outlier_for_one_person(ZQ_FaceRecognizer& recognier, const std::vector<std::string>& filenames,
float& out_min_score, int& out_i, int& out_j)
{
out_i = 0;
out_j = 0;
out_min_score = 1;
int num = filenames.size();
if (num <= 1)
return true;
std::vector<ZQ_FaceFeature> feats(num);
int W = recognier.GetCropWidth();
int H = recognier.GetCropHeight();
int dim = recognier.GetFeatDim();
for (int i = 0; i < num; i++)
{
//printf("%d/%d\n", i + 1, num);
cv::Mat img = cv::imread(filenames[i]);
if (img.empty())
return false;
if (img.rows != H || img.cols != W || img.channels() != 3)
return false;
feats[i].ChangeSize(dim);
if (!recognier.ExtractFeature(img.data, img.step[0], ZQ_PIXEL_FMT_BGR, feats[i].pData, true))
return false;
}
out_min_score = FLT_MAX;
for (int i = 0; i < num - 1; i++)
{
for (int j = i + 1; j < num; j++)
{
float tmp_score = ZQ_MathBase::DotProduct(dim, feats[i].pData, feats[j].pData);
if (tmp_score <= out_min_score)
{
out_min_score = tmp_score;
out_i = i;
out_j = j;
}
}
}
return true;
}
};
}
#endif
|
libperf.c
|
/**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <string.h>
#include <malloc.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error("%s/%s does not support atomic %s for message size %zu bytes", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error("%s/%s does not support required "#_size"-bit atomic: %s", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_SEND_COMP)] = "send completion event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV)] = "tag or active message event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV_SIG)] = "signaled message event",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
perf->params = *params;
perf->offset = 0;
perf->allocator = ucx_perf_mem_type_allocators[params->mem_type];
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("%s/%s does not support operation %s",
params->uct.tl_name, params->uct.dev_name,
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(NULL, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(NULL, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(NULL, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(ucx_perf_context_t *perf, void *address,
ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
ucs_status_ptr_t *reqs;
ucp_tag_recv_info_t info;
ucs_status_t status;
unsigned i;
reqs = calloc(sizeof(*reqs), group_size);
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep);
}
}
for (i = 0; i < group_size; ++i) {
if (!UCS_PTR_IS_PTR(reqs[i])) {
continue;
}
do {
ucp_worker_progress(perf->ucp.worker);
status = ucp_request_test(reqs[i], &info);
} while (status == UCS_INPROGRESS);
ucp_request_release(reqs[i]);
}
free(reqs);
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
/* force wireup completion */
status = ucp_worker_flush(perf->ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed: %s", ucs_status_string(status));
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
ucp_perf_barrier(perf);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(component_attr.md_resources[md_index].md_name,
NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
(void*)perf->ucp.worker);
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = 0;
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory type");
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
dyn_sssp.h
|
#ifndef DYN_SSSP_H_
#define DYN_SSSP_H_
#include <limits>
#include <vector>
#include <algorithm>
#include "traversal.h"
#include "sliding_queue_dynamic.h"
#include "../common/timer.h"
#include "../common/pvector.h"
#include <fstream>
extern std::ofstream algF;
/* Algorithm: Incremental SSSP and SSSP from scratch */
template<typename T>
void SSSPIter0(T* ds, SlidingQueue<NodeID>& queue){
pvector<bool> visited(ds->num_nodes, false);
#pragma omp parallel
{
QueueBuffer<NodeID> lqueue(queue);
#pragma omp for schedule(dynamic, 64)
for(NodeID n=0; n < ds->num_nodes; n++){
if(ds->affected[n]){
float old_path = ds->property[n];
float new_path = kDistInf;
neighborhood<T> neigh = in_neigh(n, ds);
// pull new depth from incoming neighbors
for(neighborhood_iter<T> it = neigh.begin(); it != neigh.end(); it++){
new_path = std::min(new_path, ds->property[*it] + it.extractWeight());
}
bool trigger = (((new_path < old_path) && (new_path != kDistInf)));
if(trigger){
ds->property[n] = new_path;
//put the out-neighbors into active list
for(auto v: out_neigh(n, ds)){
bool curr_val = visited[v];
if(!curr_val){
if(compare_and_swap(visited[v], curr_val, true))
lqueue.push_back(v);
}
}
}
}
}
lqueue.flush();
}
}
template<typename T>
void dynSSSPAlg(T* ds, NodeID source){
std::cout << "Running dynamic SSSP" << std::endl;
Timer t;
t.Start();
SlidingQueue<NodeID> queue(ds->num_nodes);
// set all new vertices' rank to inf, otherwise reuse old values
#pragma omp parallel for schedule(dynamic, 64)
for(NodeID n = 0; n < ds->num_nodes; n++){
if(ds->property[n] == -1){
if(n == source) ds->property[n] = 0;
else ds->property[n] = kDistInf;
}
}
SSSPIter0(ds, queue);
queue.slide_window();
while(!queue.empty()){
//std::cout << "Not empty queue, Queue Size:" << queue.size() << std::endl;
pvector<bool> visited(ds->num_nodes, false);
#pragma omp parallel
{
QueueBuffer<NodeID> lqueue(queue);
#pragma omp for schedule(dynamic, 64)
for (auto q_iter = queue.begin(); q_iter < queue.end(); q_iter++){
NodeID n = *q_iter;
float old_path = ds->property[n];
float new_path = kDistInf;
neighborhood<T> neigh = in_neigh(n, ds);
// pull new depth from incoming neighbors
for(neighborhood_iter<T> it = neigh.begin(); it != neigh.end(); it++){
new_path = std::min(new_path, ds->property[*it] + it.extractWeight());
}
// valid depth + lower than before = trigger
bool trigger = (((new_path < old_path) && (new_path != kDistInf)));
if(trigger){
ds->property[n] = new_path;
for(auto v: out_neigh(n, ds)){
bool curr_val = visited[v];
if(!curr_val){
if(compare_and_swap(visited[v], curr_val, true))
lqueue.push_back(v);
}
}
}
}
lqueue.flush();
}
queue.slide_window();
}
// clear affected array to get ready for the next update round
#pragma omp parallel for schedule(dynamic, 64)
for(NodeID i = 0; i < ds->num_nodes; i++){
ds->affected[i] = false;
}
t.Stop();
algF << t.Seconds() << std::endl;
}
template<typename T>
void SSSPStartFromScratch(T* ds, NodeID source, float delta){
std::cout <<"Running SSSP from scratch" << std::endl;
Timer t;
t.Start();
int num_edges_directed = ds->directed ? ds->num_edges : 2*ds->num_edges;
#pragma omp parallel for
for(NodeID n = 0; n < ds->num_nodes; n++)
ds->property[n] = kDistInf;
ds->property[source] = 0;
pvector<NodeID> frontier(num_edges_directed);
// two element arrays for double buffering curr=iter&1, next=(iter+1)&1
size_t shared_indexes[2] = {0, kMaxBin};
size_t frontier_tails[2] = {1, 0};
frontier[0] = source;
#pragma omp parallel
{
std::vector<std::vector<NodeID> > local_bins(0);
size_t iter = 0;
while (shared_indexes[iter&1] != kMaxBin) {
size_t &curr_bin_index = shared_indexes[iter&1];
size_t &next_bin_index = shared_indexes[(iter+1)&1];
size_t &curr_frontier_tail = frontier_tails[iter&1];
size_t &next_frontier_tail = frontier_tails[(iter+1)&1];
#pragma omp for nowait schedule(dynamic, 64)
for (size_t i=0; i < curr_frontier_tail; i++) {
NodeID u = frontier[i];
if (ds->property[u] >= delta * static_cast<float>(curr_bin_index)) {
neighborhood<T> neigh = out_neigh(u, ds);
for(neighborhood_iter<T> it = neigh.begin(); it != neigh.end(); it++){
float old_dist = ds->property[*it];
float new_dist = ds->property[u] + it.extractWeight();
if (new_dist < old_dist) {
bool changed_dist = true;
while (!compare_and_swap(ds->property[*it], old_dist, new_dist)){
old_dist = ds->property[*it];
if (old_dist <= new_dist) {
changed_dist = false;
break;
}
}
if (changed_dist) {
size_t dest_bin = new_dist/delta;
if (dest_bin >= local_bins.size()) {
local_bins.resize(dest_bin+1);
}
local_bins[dest_bin].push_back(*it);
}
}
}
}
}
for (size_t i=curr_bin_index; i < local_bins.size(); i++) {
if (!local_bins[i].empty()) {
#pragma omp critical
next_bin_index = std::min(next_bin_index, i);
break;
}
}
#pragma omp barrier
#pragma omp single nowait
{
//t.Stop();
//PrintStep(curr_bin_index, t.Millisecs(), curr_frontier_tail);
//t.Start();
curr_bin_index = kMaxBin;
curr_frontier_tail = 0;
}
if (next_bin_index < local_bins.size()) {
size_t copy_start = fetch_and_add(next_frontier_tail,
local_bins[next_bin_index].size());
std::copy(local_bins[next_bin_index].begin(),
local_bins[next_bin_index].end(), frontier.data() + copy_start);
local_bins[next_bin_index].resize(0);
}
iter++;
#pragma omp barrier
}
//#pragma omp single
//std::cout << "took " << iter << " iterations" << std::endl;
}
t.Stop();
algF << t.Seconds() << std::endl;
}
#endif // DYN_SSSP_H_
|
residual_based_bdf_scheme.h
|
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <class TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType& rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
rObject.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
rObject.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
cOpenMP.c
|
//
// cSerial.c
// CISC372_Project
//
// Created by Zehe Luan on 11/27/21.
//
#include <math.h>
#include <stdio.h>
#include <float.h>
#include <sys/time.h>
#include <omp.h>
int main() {
struct timeval start, end;
int digs = DECIMAL_DIG;
long long precision = pow(2,31) - 1;
double result = 0;
gettimeofday(&start, NULL);
#pragma omp parallel
{
#pragma omp for reduction(+: result)
for (long long i=1; i<precision;i+=4)
result += (double)1/i;
#pragma omp for reduction(-: result)
for (long long i=3; i<precision;i+=4)
result -= (double)1/i;
}
gettimeofday(&end, NULL);
long sec_take = (end.tv_sec-start.tv_sec);
long usec_take = (end.tv_usec-start.tv_usec);
if (usec_take < 0) {
sec_take -= 1;
usec_take += 1000000;
}
printf("Pi is approximately %.*e\n", digs, result*4);
printf("Time taken: %ld seconds, %ld microseconds\n", sec_take, usec_take);
}
|
GB_unaryop__lnot_uint64_int64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_int64
// op(A') function: GB_tran__lnot_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_int64
(
uint64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
force.c
|
/*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <math.h>
#include <string.h>
#include <assert.h>
#include "sysstuff.h"
#include "typedefs.h"
#include "macros.h"
#include "smalloc.h"
#include "macros.h"
#include "physics.h"
#include "force.h"
#include "nonbonded.h"
#include "names.h"
#include "network.h"
#include "pbc.h"
#include "ns.h"
#include "nrnb.h"
#include "bondf.h"
#include "mshift.h"
#include "txtdump.h"
#include "coulomb.h"
#include "pme.h"
#include "mdrun.h"
#include "domdec.h"
#include "partdec.h"
#include "qmmm.h"
#include "mpelogging.h"
#include "gmx_omp_nthreads.h"
void ns(FILE *fp,
t_forcerec *fr,
rvec x[],
matrix box,
gmx_groups_t *groups,
t_grpopts *opts,
gmx_localtop_t *top,
t_mdatoms *md,
t_commrec *cr,
t_nrnb *nrnb,
real *lambda,
real *dvdlambda,
gmx_grppairener_t *grppener,
gmx_bool bFillGrid,
gmx_bool bDoLongRangeNS)
{
char *ptr;
int nsearch;
GMX_MPE_LOG(ev_ns_start);
if (!fr->ns.nblist_initialized)
{
init_neighbor_list(fp, fr, md->homenr);
}
if (fr->bTwinRange)
{
fr->nlr = 0;
}
nsearch = search_neighbours(fp, fr, x, box, top, groups, cr, nrnb, md,
lambda, dvdlambda, grppener,
bFillGrid, bDoLongRangeNS, TRUE);
if (debug)
{
fprintf(debug, "nsearch = %d\n", nsearch);
}
/* Check whether we have to do dynamic load balancing */
/*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
&(top->idef),opts->ngener);
*/
if (fr->ns.dump_nl > 0)
{
dump_nblist(fp, cr, fr, fr->ns.dump_nl);
}
GMX_MPE_LOG(ev_ns_finish);
}
static void reduce_thread_forces(int n, rvec *f,
tensor vir,
real *Vcorr,
int efpt_ind, real *dvdl,
int nthreads, f_thread_t *f_t)
{
int t, i;
/* This reduction can run over any number of threads */
#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntBonded)) private(t) schedule(static)
for (i = 0; i < n; i++)
{
for (t = 1; t < nthreads; t++)
{
rvec_inc(f[i], f_t[t].f[i]);
}
}
for (t = 1; t < nthreads; t++)
{
*Vcorr += f_t[t].Vcorr;
*dvdl += f_t[t].dvdl[efpt_ind];
m_add(vir, f_t[t].vir, vir);
}
}
void do_force_lowlevel(FILE *fplog, gmx_large_int_t step,
t_forcerec *fr, t_inputrec *ir,
t_idef *idef, t_commrec *cr,
t_nrnb *nrnb, gmx_wallcycle_t wcycle,
t_mdatoms *md,
t_grpopts *opts,
rvec x[], history_t *hist,
rvec f[],
rvec f_longrange[],
gmx_enerdata_t *enerd,
t_fcdata *fcd,
gmx_mtop_t *mtop,
gmx_localtop_t *top,
gmx_genborn_t *born,
t_atomtypes *atype,
gmx_bool bBornRadii,
matrix box,
t_lambda *fepvals,
real *lambda,
t_graph *graph,
t_blocka *excl,
rvec mu_tot[],
int flags,
float *cycles_pme)
{
int i, j, status;
int donb_flags;
gmx_bool bDoEpot, bSepDVDL, bSB;
int pme_flags;
matrix boxs;
rvec box_size;
real Vsr, Vlr, Vcorr = 0;
t_pbc pbc;
real dvdgb;
char buf[22];
double clam_i, vlam_i;
real dvdl_dum[efptNR], dvdl, dvdl_nb[efptNR], lam_i[efptNR];
real dvdlsum;
#ifdef GMX_MPI
double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
#endif
#define PRINT_SEPDVDL(s, v, dvdlambda) if (bSepDVDL) {fprintf(fplog, sepdvdlformat, s, v, dvdlambda); }
GMX_MPE_LOG(ev_force_start);
set_pbc(&pbc, fr->ePBC, box);
/* reset free energy components */
for (i = 0; i < efptNR; i++)
{
dvdl_nb[i] = 0;
dvdl_dum[i] = 0;
}
/* Reset box */
for (i = 0; (i < DIM); i++)
{
box_size[i] = box[i][i];
}
bSepDVDL = (fr->bSepDVDL && do_per_step(step, ir->nstlog));
debug_gmx();
/* do QMMM first if requested */
if (fr->bQMMM)
{
enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr, md);
}
if (bSepDVDL)
{
fprintf(fplog, "Step %s: non-bonded V and dVdl for node %d:\n",
gmx_step_str(step, buf), cr->nodeid);
}
/* Call the short range functions all in one go. */
GMX_MPE_LOG(ev_do_fnbf_start);
#ifdef GMX_MPI
/*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
#define TAKETIME FALSE
if (TAKETIME)
{
MPI_Barrier(cr->mpi_comm_mygroup);
t0 = MPI_Wtime();
}
#endif
if (ir->nwall)
{
/* foreign lambda component for walls */
dvdl = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
enerd->grpp.ener[egLJSR], nrnb);
PRINT_SEPDVDL("Walls", 0.0, dvdl);
enerd->dvdl_lin[efptVDW] += dvdl;
}
/* If doing GB, reset dvda and calculate the Born radii */
if (ir->implicit_solvent)
{
wallcycle_sub_start(wcycle, ewcsNONBONDED);
for (i = 0; i < born->nr; i++)
{
fr->dvda[i] = 0;
}
if (bBornRadii)
{
calc_gb_rad(cr, fr, ir, top, atype, x, &(fr->gblist), born, md, nrnb);
}
wallcycle_sub_stop(wcycle, ewcsNONBONDED);
}
where();
/* We only do non-bonded calculation with group scheme here, the verlet
* calls are done from do_force_cutsVERLET(). */
if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
{
donb_flags = 0;
/* Add short-range interactions */
donb_flags |= GMX_NONBONDED_DO_SR;
if (flags & GMX_FORCE_FORCES)
{
donb_flags |= GMX_NONBONDED_DO_FORCE;
}
if (flags & GMX_FORCE_ENERGY)
{
donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
}
if (flags & GMX_FORCE_DO_LR)
{
donb_flags |= GMX_NONBONDED_DO_LR;
}
wallcycle_sub_start(wcycle, ewcsNONBONDED);
do_nonbonded(cr, fr, x, f, f_longrange, md, excl,
&enerd->grpp, box_size, nrnb,
lambda, dvdl_nb, -1, -1, donb_flags);
/* If we do foreign lambda and we have soft-core interactions
* we have to recalculate the (non-linear) energies contributions.
*/
if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
{
for (i = 0; i < enerd->n_lambda; i++)
{
for (j = 0; j < efptNR; j++)
{
lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
}
reset_foreign_enerdata(enerd);
do_nonbonded(cr, fr, x, f, f_longrange, md, excl,
&(enerd->foreign_grpp), box_size, nrnb,
lam_i, dvdl_dum, -1, -1,
(donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
sum_epot(&ir->opts, &(enerd->foreign_grpp), enerd->foreign_term);
enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
}
}
wallcycle_sub_stop(wcycle, ewcsNONBONDED);
where();
}
/* If we are doing GB, calculate bonded forces and apply corrections
* to the solvation forces */
/* MRS: Eventually, many need to include free energy contribution here! */
if (ir->implicit_solvent)
{
wallcycle_sub_start(wcycle, ewcsBONDED);
calc_gb_forces(cr, md, born, top, atype, x, f, fr, idef,
ir->gb_algorithm, ir->sa_algorithm, nrnb, bBornRadii, &pbc, graph, enerd);
wallcycle_sub_stop(wcycle, ewcsBONDED);
}
#ifdef GMX_MPI
if (TAKETIME)
{
t1 = MPI_Wtime();
fr->t_fnbf += t1-t0;
}
#endif
if (fepvals->sc_alpha != 0)
{
enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
}
else
{
enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
}
if (fepvals->sc_alpha != 0)
/* even though coulomb part is linear, we already added it, beacuse we
need to go through the vdw calculation anyway */
{
enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
}
else
{
enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
}
Vsr = 0;
if (bSepDVDL)
{
for (i = 0; i < enerd->grpp.nener; i++)
{
Vsr +=
(fr->bBHAM ?
enerd->grpp.ener[egBHAMSR][i] :
enerd->grpp.ener[egLJSR][i])
+ enerd->grpp.ener[egCOULSR][i] + enerd->grpp.ener[egGB][i];
}
dvdlsum = dvdl_nb[efptVDW] + dvdl_nb[efptCOUL];
PRINT_SEPDVDL("VdW and Coulomb SR particle-p.", Vsr, dvdlsum);
}
debug_gmx();
GMX_MPE_LOG(ev_do_fnbf_finish);
if (debug)
{
pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
}
/* Shift the coordinates. Must be done before bonded forces and PPPM,
* but is also necessary for SHAKE and update, therefore it can NOT
* go when no bonded forces have to be evaluated.
*/
/* Here sometimes we would not need to shift with NBFonly,
* but we do so anyhow for consistency of the returned coordinates.
*/
if (graph)
{
shift_self(graph, box, x);
if (TRICLINIC(box))
{
inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
}
else
{
inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
}
}
/* Check whether we need to do bondeds or correct for exclusions */
if (fr->bMolPBC &&
((flags & GMX_FORCE_BONDED)
|| EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype)))
{
/* Since all atoms are in the rectangular or triclinic unit-cell,
* only single box vector shifts (2 in x) are required.
*/
set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box);
}
debug_gmx();
if (flags & GMX_FORCE_BONDED)
{
GMX_MPE_LOG(ev_calc_bonds_start);
wallcycle_sub_start(wcycle, ewcsBONDED);
calc_bonds(fplog, cr->ms,
idef, x, hist, f, fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL, atype, born,
flags,
fr->bSepDVDL && do_per_step(step, ir->nstlog), step);
/* Check if we have to determine energy differences
* at foreign lambda's.
*/
if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) &&
idef->ilsort != ilsortNO_FE)
{
if (idef->ilsort != ilsortFE_SORTED)
{
gmx_incons("The bonded interactions are not sorted for free energy");
}
for (i = 0; i < enerd->n_lambda; i++)
{
reset_foreign_enerdata(enerd);
for (j = 0; j < efptNR; j++)
{
lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
}
calc_bonds_lambda(fplog, idef, x, fr, &pbc, graph, &(enerd->foreign_grpp), enerd->foreign_term, nrnb, lam_i, md,
fcd, DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL);
sum_epot(&ir->opts, &(enerd->foreign_grpp), enerd->foreign_term);
enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
}
}
debug_gmx();
GMX_MPE_LOG(ev_calc_bonds_finish);
wallcycle_sub_stop(wcycle, ewcsBONDED);
}
where();
*cycles_pme = 0;
if (EEL_FULL(fr->eeltype))
{
bSB = (ir->nwall == 2);
if (bSB)
{
copy_mat(box, boxs);
svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
box_size[ZZ] *= ir->wall_ewald_zfac;
}
clear_mat(fr->vir_el_recip);
if (fr->bEwald)
{
Vcorr = 0;
dvdl = 0;
/* With the Verlet scheme exclusion forces are calculated
* in the non-bonded kernel.
*/
/* The TPI molecule does not have exclusions with the rest
* of the system and no intra-molecular PME grid contributions
* will be calculated in gmx_pme_calc_energy.
*/
if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
ir->ewald_geometry != eewg3D ||
ir->epsilon_surface != 0)
{
int nthreads, t;
wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
if (fr->n_tpi > 0)
{
gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
}
nthreads = gmx_omp_nthreads_get(emntBonded);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (t = 0; t < nthreads; t++)
{
int s, e, i;
rvec *fnv;
tensor *vir;
real *Vcorrt, *dvdlt;
if (t == 0)
{
fnv = fr->f_novirsum;
vir = &fr->vir_el_recip;
Vcorrt = &Vcorr;
dvdlt = &dvdl;
}
else
{
fnv = fr->f_t[t].f;
vir = &fr->f_t[t].vir;
Vcorrt = &fr->f_t[t].Vcorr;
dvdlt = &fr->f_t[t].dvdl[efptCOUL];
for (i = 0; i < fr->natoms_force; i++)
{
clear_rvec(fnv[i]);
}
clear_mat(*vir);
}
*dvdlt = 0;
*Vcorrt =
ewald_LRcorrection(fplog,
fr->excl_load[t], fr->excl_load[t+1],
cr, t, fr,
md->chargeA,
md->nChargePerturbed ? md->chargeB : NULL,
ir->cutoff_scheme != ecutsVERLET,
excl, x, bSB ? boxs : box, mu_tot,
ir->ewald_geometry,
ir->epsilon_surface,
fnv, *vir,
lambda[efptCOUL], dvdlt);
}
if (nthreads > 1)
{
reduce_thread_forces(fr->natoms_force, fr->f_novirsum,
fr->vir_el_recip,
&Vcorr, efptCOUL, &dvdl,
nthreads, fr->f_t);
}
wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
}
if (fr->n_tpi == 0)
{
Vcorr += ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
&dvdl, fr->vir_el_recip);
}
PRINT_SEPDVDL("Ewald excl./charge/dip. corr.", Vcorr, dvdl);
enerd->dvdl_lin[efptCOUL] += dvdl;
}
status = 0;
Vlr = 0;
dvdl = 0;
switch (fr->eeltype)
{
case eelPME:
case eelPMESWITCH:
case eelPMEUSER:
case eelPMEUSERSWITCH:
case eelP3M_AD:
if (cr->duty & DUTY_PME)
{
assert(fr->n_tpi >= 0);
if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
{
pme_flags = GMX_PME_SPREAD_Q | GMX_PME_SOLVE;
if (flags & GMX_FORCE_FORCES)
{
pme_flags |= GMX_PME_CALC_F;
}
if (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY))
{
pme_flags |= GMX_PME_CALC_ENER_VIR;
}
if (fr->n_tpi > 0)
{
/* We don't calculate f, but we do want the potential */
pme_flags |= GMX_PME_CALC_POT;
}
wallcycle_start(wcycle, ewcPMEMESH);
status = gmx_pme_do(fr->pmedata,
md->start, md->homenr - fr->n_tpi,
x, fr->f_novirsum,
md->chargeA, md->chargeB,
bSB ? boxs : box, cr,
DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
nrnb, wcycle,
fr->vir_el_recip, fr->ewaldcoeff,
&Vlr, lambda[efptCOUL], &dvdl,
pme_flags);
*cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
/* We should try to do as little computation after
* this as possible, because parallel PME synchronizes
* the nodes, so we want all load imbalance of the rest
* of the force calculation to be before the PME call.
* DD load balancing is done on the whole time of
* the force call (without PME).
*/
}
if (fr->n_tpi > 0)
{
/* Determine the PME grid energy of the test molecule
* with the PME grid potential of the other charges.
*/
gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
x + md->homenr - fr->n_tpi,
md->chargeA + md->homenr - fr->n_tpi,
&Vlr);
}
PRINT_SEPDVDL("PME mesh", Vlr, dvdl);
}
break;
case eelEWALD:
Vlr = do_ewald(fplog, FALSE, ir, x, fr->f_novirsum,
md->chargeA, md->chargeB,
box_size, cr, md->homenr,
fr->vir_el_recip, fr->ewaldcoeff,
lambda[efptCOUL], &dvdl, fr->ewald_table);
PRINT_SEPDVDL("Ewald long-range", Vlr, dvdl);
break;
default:
gmx_fatal(FARGS, "No such electrostatics method implemented %s",
eel_names[fr->eeltype]);
}
if (status != 0)
{
gmx_fatal(FARGS, "Error %d in long range electrostatics routine %s",
status, EELTYPE(fr->eeltype));
}
/* Note that with separate PME nodes we get the real energies later */
enerd->dvdl_lin[efptCOUL] += dvdl;
enerd->term[F_COUL_RECIP] = Vlr + Vcorr;
if (debug)
{
fprintf(debug, "Vlr = %g, Vcorr = %g, Vlr_corr = %g\n",
Vlr, Vcorr, enerd->term[F_COUL_RECIP]);
pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM);
pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
}
}
else
{
if (EEL_RF(fr->eeltype))
{
/* With the Verlet scheme exclusion forces are calculated
* in the non-bonded kernel.
*/
if (ir->cutoff_scheme != ecutsVERLET && fr->eeltype != eelRF_NEC)
{
dvdl = 0;
enerd->term[F_RF_EXCL] =
RF_excl_correction(fplog, fr, graph, md, excl, x, f,
fr->fshift, &pbc, lambda[efptCOUL], &dvdl);
}
enerd->dvdl_lin[efptCOUL] += dvdl;
PRINT_SEPDVDL("RF exclusion correction",
enerd->term[F_RF_EXCL], dvdl);
}
}
where();
debug_gmx();
if (debug)
{
print_nrnb(debug, nrnb);
}
debug_gmx();
#ifdef GMX_MPI
if (TAKETIME)
{
t2 = MPI_Wtime();
MPI_Barrier(cr->mpi_comm_mygroup);
t3 = MPI_Wtime();
fr->t_wait += t3-t2;
if (fr->timesteps == 11)
{
fprintf(stderr, "* PP load balancing info: node %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
cr->nodeid, gmx_step_str(fr->timesteps, buf),
100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
(fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
}
fr->timesteps++;
}
#endif
if (debug)
{
pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
}
GMX_MPE_LOG(ev_force_finish);
}
void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
{
int i, n2;
for (i = 0; i < F_NRE; i++)
{
enerd->term[i] = 0;
enerd->foreign_term[i] = 0;
}
for (i = 0; i < efptNR; i++)
{
enerd->dvdl_lin[i] = 0;
enerd->dvdl_nonlin[i] = 0;
}
n2 = ngener*ngener;
if (debug)
{
fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
}
enerd->grpp.nener = n2;
enerd->foreign_grpp.nener = n2;
for (i = 0; (i < egNR); i++)
{
snew(enerd->grpp.ener[i], n2);
snew(enerd->foreign_grpp.ener[i], n2);
}
if (n_lambda)
{
enerd->n_lambda = 1 + n_lambda;
snew(enerd->enerpart_lambda, enerd->n_lambda);
}
else
{
enerd->n_lambda = 0;
}
}
void destroy_enerdata(gmx_enerdata_t *enerd)
{
int i;
for (i = 0; (i < egNR); i++)
{
sfree(enerd->grpp.ener[i]);
}
for (i = 0; (i < egNR); i++)
{
sfree(enerd->foreign_grpp.ener[i]);
}
if (enerd->n_lambda)
{
sfree(enerd->enerpart_lambda);
}
}
static real sum_v(int n, real v[])
{
real t;
int i;
t = 0.0;
for (i = 0; (i < n); i++)
{
t = t + v[i];
}
return t;
}
void sum_epot(t_grpopts *opts, gmx_grppairener_t *grpp, real *epot)
{
int i;
/* Accumulate energies */
epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
epot[F_COUL_LR] = sum_v(grpp->nener, grpp->ener[egCOULLR]);
epot[F_LJ_LR] = sum_v(grpp->nener, grpp->ener[egLJLR]);
/* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */
epot[F_GBPOL] += sum_v(grpp->nener, grpp->ener[egGB]);
/* lattice part of LR doesnt belong to any group
* and has been added earlier
*/
epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
epot[F_BHAM_LR] = sum_v(grpp->nener, grpp->ener[egBHAMLR]);
epot[F_EPOT] = 0;
for (i = 0; (i < F_EPOT); i++)
{
if (i != F_DISRESVIOL && i != F_ORIRESDEV)
{
epot[F_EPOT] += epot[i];
}
}
}
void sum_dhdl(gmx_enerdata_t *enerd, real *lambda, t_lambda *fepvals)
{
int i, j, index;
double dlam;
enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
enerd->term[F_DVDL] = 0.0;
for (i = 0; i < efptNR; i++)
{
if (fepvals->separate_dvdl[i])
{
/* could this be done more readably/compactly? */
switch (i)
{
case (efptMASS):
index = F_DKDL;
break;
case (efptCOUL):
index = F_DVDL_COUL;
break;
case (efptVDW):
index = F_DVDL_VDW;
break;
case (efptBONDED):
index = F_DVDL_BONDED;
break;
case (efptRESTRAINT):
index = F_DVDL_RESTRAINT;
break;
default:
index = F_DVDL;
break;
}
enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
if (debug)
{
fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
}
}
else
{
enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
if (debug)
{
fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
}
}
}
/* Notes on the foreign lambda free energy difference evaluation:
* Adding the potential and ekin terms that depend linearly on lambda
* as delta lam * dvdl to the energy differences is exact.
* For the constraints this is not exact, but we have no other option
* without literally changing the lengths and reevaluating the energies at each step.
* (try to remedy this post 4.6 - MRS)
* For the non-bonded LR term we assume that the soft-core (if present)
* no longer affects the energy beyond the short-range cut-off,
* which is a very good approximation (except for exotic settings).
* (investigate how to overcome this post 4.6 - MRS)
*/
if (fepvals->separate_dvdl[efptBONDED])
{
enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
}
else
{
enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
}
enerd->term[F_DVDL_CONSTR] = 0;
for (i = 0; i < fepvals->n_lambda; i++)
{ /* note we are iterating over fepvals here!
For the current lam, dlam = 0 automatically,
so we don't need to add anything to the
enerd->enerpart_lambda[0] */
/* we don't need to worry about dvdl_lin contributions to dE at
current lambda, because the contributions to the current
lambda are automatically zeroed */
for (j = 0; j < efptNR; j++)
{
/* Note that this loop is over all dhdl components, not just the separated ones */
dlam = (fepvals->all_lambda[j][i]-lambda[j]);
enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
if (debug)
{
fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
fepvals->all_lambda[j][i], efpt_names[j],
(enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
dlam, enerd->dvdl_lin[j]);
}
}
}
}
void reset_foreign_enerdata(gmx_enerdata_t *enerd)
{
int i, j;
/* First reset all foreign energy components. Foreign energies always called on
neighbor search steps */
for (i = 0; (i < egNR); i++)
{
for (j = 0; (j < enerd->grpp.nener); j++)
{
enerd->foreign_grpp.ener[i][j] = 0.0;
}
}
/* potential energy components */
for (i = 0; (i <= F_EPOT); i++)
{
enerd->foreign_term[i] = 0.0;
}
}
void reset_enerdata(t_grpopts *opts,
t_forcerec *fr, gmx_bool bNS,
gmx_enerdata_t *enerd,
gmx_bool bMaster)
{
gmx_bool bKeepLR;
int i, j;
/* First reset all energy components, except for the long range terms
* on the master at non neighbor search steps, since the long range
* terms have already been summed at the last neighbor search step.
*/
bKeepLR = (fr->bTwinRange && !bNS);
for (i = 0; (i < egNR); i++)
{
if (!(bKeepLR && bMaster && (i == egCOULLR || i == egLJLR)))
{
for (j = 0; (j < enerd->grpp.nener); j++)
{
enerd->grpp.ener[i][j] = 0.0;
}
}
}
for (i = 0; i < efptNR; i++)
{
enerd->dvdl_lin[i] = 0.0;
enerd->dvdl_nonlin[i] = 0.0;
}
/* Normal potential energy components */
for (i = 0; (i <= F_EPOT); i++)
{
enerd->term[i] = 0.0;
}
/* Initialize the dVdlambda term with the long range contribution */
/* Initialize the dvdl term with the long range contribution */
enerd->term[F_DVDL] = 0.0;
enerd->term[F_DVDL_COUL] = 0.0;
enerd->term[F_DVDL_VDW] = 0.0;
enerd->term[F_DVDL_BONDED] = 0.0;
enerd->term[F_DVDL_RESTRAINT] = 0.0;
enerd->term[F_DKDL] = 0.0;
if (enerd->n_lambda > 0)
{
for (i = 0; i < enerd->n_lambda; i++)
{
enerd->enerpart_lambda[i] = 0.0;
}
}
/* reset foreign energy data - separate function since we also call it elsewhere */
reset_foreign_enerdata(enerd);
}
|
matrix.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n");
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
fmm.h
|
#ifndef fmm_h
#define fmm_h
#include <cstring> // std::memset
#include <fstream> // std::ofstream
#include <type_traits> // std::is_same
#include "fmm_base.h"
#include "intrinsics.h"
#include "math_wrapper.h"
namespace exafmm_t {
template <typename T>
class Fmm : public FmmBase<T> {
public:
/* precomputation matrices */
std::vector<std::vector<T>> matrix_UC2E_U;
std::vector<std::vector<T>> matrix_UC2E_V;
std::vector<std::vector<T>> matrix_DC2E_U;
std::vector<std::vector<T>> matrix_DC2E_V;
std::vector<std::vector<std::vector<T>>> matrix_M2M;
std::vector<std::vector<std::vector<T>>> matrix_L2L;
std::vector<M2LData> m2ldata;
/* constructors */
Fmm() {}
Fmm(int p_, int ncrit_, std::string filename_=std::string()) : FmmBase<T>(p_, ncrit_, filename_) {}
/* precomputation */
//! Setup the sizes of precomputation matrices
void initialize_matrix() {
int& nsurf_ = this->nsurf;
int& depth_ = this->depth;
matrix_UC2E_V.resize(depth_+1, std::vector<T>(nsurf_*nsurf_));
matrix_UC2E_U.resize(depth_+1, std::vector<T>(nsurf_*nsurf_));
matrix_DC2E_V.resize(depth_+1, std::vector<T>(nsurf_*nsurf_));
matrix_DC2E_U.resize(depth_+1, std::vector<T>(nsurf_*nsurf_));
matrix_M2M.resize(depth_+1);
matrix_L2L.resize(depth_+1);
for (int level=0; level<=depth_; ++level) {
matrix_M2M[level].resize(REL_COORD[M2M_Type].size(), std::vector<T>(nsurf_*nsurf_));
matrix_L2L[level].resize(REL_COORD[L2L_Type].size(), std::vector<T>(nsurf_*nsurf_));
}
}
//! Precompute M2M and L2L
void precompute_M2M() {
int& nsurf_ = this->nsurf;
real_t parent_coord[3] = {0, 0, 0};
for (int level=0; level<=this->depth; level++) {
RealVec parent_up_check_surf = surface(this->p, this->r0, level, parent_coord, 2.95);
real_t s = this->r0 * powf(0.5, level+1);
int npos = REL_COORD[M2M_Type].size(); // number of relative positions
#pragma omp parallel for
for(int i=0; i<npos; i++) {
// compute kernel matrix
ivec3& coord = REL_COORD[M2M_Type][i];
real_t child_coord[3] = {parent_coord[0] + coord[0]*s,
parent_coord[1] + coord[1]*s,
parent_coord[2] + coord[2]*s};
RealVec child_up_equiv_surf = surface(this->p, this->r0, level+1, child_coord, 1.05);
std::vector<T> matrix_pc2ce(nsurf_*nsurf_);
this->kernel_matrix(parent_up_check_surf, child_up_equiv_surf, matrix_pc2ce);
// M2M
std::vector<T> buffer(nsurf_*nsurf_);
gemm(nsurf_, nsurf_, nsurf_, &(matrix_UC2E_U[level][0]), &matrix_pc2ce[0], &buffer[0]);
gemm(nsurf_, nsurf_, nsurf_, &(matrix_UC2E_V[level][0]), &buffer[0], &(matrix_M2M[level][i][0]));
// L2L
matrix_pc2ce = transpose(matrix_pc2ce, nsurf_, nsurf_);
gemm(nsurf_, nsurf_, nsurf_, &matrix_pc2ce[0], &(matrix_DC2E_V[level][0]), &buffer[0]);
gemm(nsurf_, nsurf_, nsurf_, &buffer[0], &(matrix_DC2E_U[level][0]), &(matrix_L2L[level][i][0]));
}
}
}
//! Precompute UC2UE and DC2DE matrices
void precompute_check2equiv() {}
//! Precompute M2L
void precompute_M2L(std::ofstream& file) {}
//! Save precomputation matrices
void save_matrix(std::ofstream& file) {
file.write(reinterpret_cast<char*>(&this->r0), sizeof(real_t)); // r0
size_t size = this->nsurf * this->nsurf;
for(int l=0; l<=this->depth; l++) {
// UC2E, DC2E
file.write(reinterpret_cast<char*>(&matrix_UC2E_U[l][0]), size*sizeof(T));
file.write(reinterpret_cast<char*>(&matrix_UC2E_V[l][0]), size*sizeof(T));
file.write(reinterpret_cast<char*>(&matrix_DC2E_U[l][0]), size*sizeof(T));
file.write(reinterpret_cast<char*>(&matrix_DC2E_V[l][0]), size*sizeof(T));
// M2M, L2L
for (auto & vec : matrix_M2M[l]) {
file.write(reinterpret_cast<char*>(&vec[0]), size*sizeof(T));
}
for (auto & vec : matrix_L2L[l]) {
file.write(reinterpret_cast<char*>(&vec[0]), size*sizeof(T));
}
}
}
//! Check and load precomputation matrices
void load_matrix() {
int& nsurf_ = this->nsurf;
int& depth_ = this->depth;
size_t size_M2L = this->nfreq * 2 * NCHILD * NCHILD;
size_t file_size = (2*REL_COORD[M2M_Type].size()+4) * nsurf_ * nsurf_ * (depth_+1) * sizeof(T)
+ REL_COORD[M2L_Type].size() * size_M2L * depth_ * sizeof(real_t)
+ 1 * sizeof(real_t); // +1 denotes r0
std::ifstream file(this->filename, std::ifstream::binary);
if (file.good()) {
file.seekg(0, file.end);
if (size_t(file.tellg()) == file_size) { // if file size is correct
file.seekg(0, file.beg); // move the position back to the beginning
real_t r0_;
file.read(reinterpret_cast<char*>(&r0_), sizeof(real_t));
if (this->r0 == r0_) { // if radius match
size_t size = nsurf_ * nsurf_;
for (int l=0; l<=depth_; l++) {
// UC2E, DC2E
file.read(reinterpret_cast<char*>(&matrix_UC2E_U[l][0]), size*sizeof(T));
file.read(reinterpret_cast<char*>(&matrix_UC2E_V[l][0]), size*sizeof(T));
file.read(reinterpret_cast<char*>(&matrix_DC2E_U[l][0]), size*sizeof(T));
file.read(reinterpret_cast<char*>(&matrix_DC2E_V[l][0]), size*sizeof(T));
// M2M, L2L
for (auto& vec : matrix_M2M[l]) {
file.read(reinterpret_cast<char*>(&vec[0]), size*sizeof(T));
}
for (auto& vec : matrix_L2L[l]) {
file.read(reinterpret_cast<char*>(&vec[0]), size*sizeof(T));
}
}
this->is_precomputed = true;
}
}
}
file.close();
}
//! Precompute
void precompute() {
initialize_matrix();
load_matrix();
if (!this->is_precomputed) {
precompute_check2equiv();
precompute_M2M();
std::remove(this->filename.c_str());
std::ofstream file(this->filename, std::ofstream::binary);
save_matrix(file);
precompute_M2L(file);
file.close();
}
}
//! P2M operator
void P2M(NodePtrs<T>& leafs) {
int& nsurf_ = this->nsurf;
real_t c[3] = {0,0,0};
std::vector<RealVec> up_check_surf;
up_check_surf.resize(this->depth+1);
for (int level=0; level<=this->depth; level++) {
up_check_surf[level].resize(nsurf_*3);
up_check_surf[level] = surface(this->p, this->r0, level, c, 2.95);
}
#pragma omp parallel for
for (size_t i=0; i<leafs.size(); i++) {
Node<T>* leaf = leafs[i];
int level = leaf->level;
// calculate upward check potential induced by sources' charges
RealVec check_coord(nsurf_*3);
for (int k=0; k<nsurf_; k++) {
check_coord[3*k+0] = up_check_surf[level][3*k+0] + leaf->x[0];
check_coord[3*k+1] = up_check_surf[level][3*k+1] + leaf->x[1];
check_coord[3*k+2] = up_check_surf[level][3*k+2] + leaf->x[2];
}
this->potential_P2P(leaf->src_coord, leaf->src_value,
check_coord, leaf->up_equiv);
std::vector<T> buffer(nsurf_);
std::vector<T> equiv(nsurf_);
gemv(nsurf_, nsurf_, &(matrix_UC2E_U[level][0]), &(leaf->up_equiv[0]), &buffer[0]);
gemv(nsurf_, nsurf_, &(matrix_UC2E_V[level][0]), &buffer[0], &equiv[0]);
for (int k=0; k<nsurf_; k++)
leaf->up_equiv[k] = equiv[k];
}
}
//! L2P operator
void L2P(NodePtrs<T>& leafs) {
int& nsurf_ = this->nsurf;
real_t c[3] = {0,0,0};
std::vector<RealVec> dn_equiv_surf;
dn_equiv_surf.resize(this->depth+1);
for (int level=0; level<=this->depth; level++) {
dn_equiv_surf[level].resize(nsurf_*3);
dn_equiv_surf[level] = surface(this->p, this->r0, level, c, 2.95);
}
#pragma omp parallel for
for (size_t i=0; i<leafs.size(); i++) {
Node<T>* leaf = leafs[i];
int level = leaf->level;
// down check surface potential -> equivalent surface charge
std::vector<T> buffer(nsurf_);
std::vector<T> equiv(nsurf_);
gemv(nsurf_, nsurf_, &(matrix_DC2E_U[level][0]), &(leaf->dn_equiv[0]), &buffer[0]);
gemv(nsurf_, nsurf_, &(matrix_DC2E_V[level][0]), &buffer[0], &equiv[0]);
for (int k=0; k<nsurf_; k++)
leaf->dn_equiv[k] = equiv[k];
// equivalent surface charge -> target potential
RealVec equiv_coord(nsurf_*3);
for (int k=0; k<nsurf_; k++) {
equiv_coord[3*k+0] = dn_equiv_surf[level][3*k+0] + leaf->x[0];
equiv_coord[3*k+1] = dn_equiv_surf[level][3*k+1] + leaf->x[1];
equiv_coord[3*k+2] = dn_equiv_surf[level][3*k+2] + leaf->x[2];
}
this->gradient_P2P(equiv_coord, leaf->dn_equiv,
leaf->trg_coord, leaf->trg_value);
}
}
//! M2M operator
void M2M(Node<T>* node) {
int& nsurf_ = this->nsurf;
if (node->is_leaf) return;
for (int octant=0; octant<8; octant++) {
if (node->children[octant])
#pragma omp task untied
M2M(node->children[octant]);
}
#pragma omp taskwait
for (int octant=0; octant<8; octant++) {
if (node->children[octant]) {
Node<T>* child = node->children[octant];
std::vector<T> buffer(nsurf_);
int level = node->level;
gemv(nsurf_, nsurf_, &(matrix_M2M[level][octant][0]), &child->up_equiv[0], &buffer[0]);
for (int k=0; k<nsurf_; k++) {
node->up_equiv[k] += buffer[k];
}
}
}
}
//! L2L operator
void L2L(Node<T>* node) {
int& nsurf_ = this->nsurf;
if (node->is_leaf) return;
for (int octant=0; octant<8; octant++) {
if (node->children[octant]) {
Node<T>* child = node->children[octant];
std::vector<T> buffer(nsurf_);
int level = node->level;
gemv(nsurf_, nsurf_, &(matrix_L2L[level][octant][0]), &node->dn_equiv[0], &buffer[0]);
for (int k=0; k<nsurf_; k++)
child->dn_equiv[k] += buffer[k];
}
}
for (int octant=0; octant<8; octant++) {
if (node->children[octant])
#pragma omp task untied
L2L(node->children[octant]);
}
#pragma omp taskwait
}
void M2L_setup(NodePtrs<T>& nonleafs) {
int& nsurf_ = this->nsurf;
int& depth_ = this->depth;
int npos = REL_COORD[M2L_Type].size(); // number of M2L relative positions
m2ldata.resize(depth_); // initialize m2ldata
// construct lists of target nodes for M2L operator at each level
std::vector<NodePtrs<T>> trg_nodes(depth_);
for (size_t i=0; i<nonleafs.size(); i++) {
trg_nodes[nonleafs[i]->level].push_back(nonleafs[i]);
}
// prepare for m2ldata for each level
for (int l=0; l<depth_; l++) {
// construct M2L source nodes for current level
std::set<Node<T>*> src_nodes_;
for (size_t i=0; i<trg_nodes[l].size(); i++) {
NodePtrs<T>& M2L_list = trg_nodes[l][i]->M2L_list;
for (int k=0; k<npos; k++) {
if (M2L_list[k])
src_nodes_.insert(M2L_list[k]);
}
}
NodePtrs<T> src_nodes;
auto it = src_nodes_.begin();
for (; it!=src_nodes_.end(); it++) {
src_nodes.push_back(*it);
}
// prepare the indices of src_nodes & trg_nodes in all_up_equiv & all_dn_equiv
std::vector<size_t> fft_offset(src_nodes.size()); // displacement in all_up_equiv
std::vector<size_t> ifft_offset(trg_nodes[l].size()); // displacement in all_dn_equiv
for (size_t i=0; i<src_nodes.size(); i++) {
fft_offset[i] = src_nodes[i]->children[0]->idx * nsurf_;
}
for (size_t i=0; i<trg_nodes[l].size(); i++) {
ifft_offset[i] = trg_nodes[l][i]->children[0]->idx * nsurf_;
}
// calculate interaction_offset_f & interaction_count_offset
std::vector<size_t> interaction_offset_f;
std::vector<size_t> interaction_count_offset;
for (size_t i=0; i<src_nodes.size(); i++) {
src_nodes[i]->idx_M2L = i; // node_id: node's index in src_nodes list
}
size_t nblk_trg = trg_nodes[l].size() * sizeof(real_t) / CACHE_SIZE;
if (nblk_trg==0) nblk_trg = 1;
size_t interaction_count_offset_ = 0;
size_t fft_size = 2 * NCHILD * this->nfreq;
for (size_t iblk_trg=0; iblk_trg<nblk_trg; iblk_trg++) {
size_t blk_start = (trg_nodes[l].size()* iblk_trg ) / nblk_trg;
size_t blk_end = (trg_nodes[l].size()*(iblk_trg+1)) / nblk_trg;
for (int k=0; k<npos; k++) {
for (size_t i=blk_start; i<blk_end; i++) {
NodePtrs<T>& M2L_list = trg_nodes[l][i]->M2L_list;
if (M2L_list[k]) {
interaction_offset_f.push_back(M2L_list[k]->idx_M2L * fft_size); // src_node's displacement in fft_in
interaction_offset_f.push_back( i * fft_size); // trg_node's displacement in fft_out
interaction_count_offset_++;
}
}
interaction_count_offset.push_back(interaction_count_offset_);
}
}
m2ldata[l].fft_offset = fft_offset;
m2ldata[l].ifft_offset = ifft_offset;
m2ldata[l].interaction_offset_f = interaction_offset_f;
m2ldata[l].interaction_count_offset = interaction_count_offset;
}
}
void hadamard_product(std::vector<size_t>& interaction_count_offset, std::vector<size_t>& interaction_offset_f,
AlignedVec& fft_in, AlignedVec& fft_out, std::vector<AlignedVec>& matrix_M2L) {
size_t fft_size = 2 * NCHILD * this->nfreq;
AlignedVec zero_vec0(fft_size, 0.);
AlignedVec zero_vec1(fft_size, 0.);
size_t npos = matrix_M2L.size();
size_t nblk_inter = interaction_count_offset.size(); // num of blocks of interactions
size_t nblk_trg = nblk_inter / npos; // num of blocks based on trg_nodes
int BLOCK_SIZE = CACHE_SIZE * 2 / sizeof(real_t);
std::vector<real_t*> IN_(BLOCK_SIZE*nblk_inter);
std::vector<real_t*> OUT_(BLOCK_SIZE*nblk_inter);
// initialize fft_out with zero
#pragma omp parallel for
for (size_t i=0; i<fft_out.capacity()/fft_size; ++i) {
std::memset(fft_out.data()+i*fft_size, 0, fft_size*sizeof(real_t));
}
#pragma omp parallel for
for (size_t iblk_inter=0; iblk_inter<nblk_inter; iblk_inter++) {
size_t interaction_count_offset0 = (iblk_inter==0 ? 0 : interaction_count_offset[iblk_inter-1]);
size_t interaction_count_offset1 = interaction_count_offset[iblk_inter];
size_t interaction_count = interaction_count_offset1 - interaction_count_offset0;
for (size_t j=0; j<interaction_count; j++) {
IN_ [BLOCK_SIZE*iblk_inter+j] = &fft_in[interaction_offset_f[(interaction_count_offset0+j)*2+0]];
OUT_[BLOCK_SIZE*iblk_inter+j] = &fft_out[interaction_offset_f[(interaction_count_offset0+j)*2+1]];
}
IN_ [BLOCK_SIZE*iblk_inter+interaction_count] = &zero_vec0[0];
OUT_[BLOCK_SIZE*iblk_inter+interaction_count] = &zero_vec1[0];
}
for (size_t iblk_trg=0; iblk_trg<nblk_trg; iblk_trg++) {
#pragma omp parallel for
for (int k=0; k<this->nfreq; k++) {
for (size_t ipos=0; ipos<npos; ipos++) {
size_t iblk_inter = iblk_trg*npos + ipos;
size_t interaction_count_offset0 = (iblk_inter==0 ? 0 : interaction_count_offset[iblk_inter-1]);
size_t interaction_count_offset1 = interaction_count_offset[iblk_inter];
size_t interaction_count = interaction_count_offset1 - interaction_count_offset0;
real_t** IN = &IN_[BLOCK_SIZE*iblk_inter];
real_t** OUT= &OUT_[BLOCK_SIZE*iblk_inter];
real_t* M = &matrix_M2L[ipos][k*2*NCHILD*NCHILD]; // k-th freq's (row) offset in matrix_M2L
for (size_t j=0; j<interaction_count; j+=2) {
real_t* M_ = M;
real_t* IN0 = IN [j+0] + k*NCHILD*2; // go to k-th freq chunk
real_t* IN1 = IN [j+1] + k*NCHILD*2;
real_t* OUT0 = OUT[j+0] + k*NCHILD*2;
real_t* OUT1 = OUT[j+1] + k*NCHILD*2;
matmult_8x8x2(M_, IN0, IN1, OUT0, OUT1);
}
}
}
}
}
void fft_up_equiv(std::vector<size_t>& fft_offset, std::vector<T>& all_up_equiv, AlignedVec& fft_in) {}
void ifft_dn_check(std::vector<size_t>& ifft_offset, AlignedVec& fft_out, std::vector<T>& all_dn_equiv) {}
void M2L(Nodes<T>& nodes) {
int& nsurf_ = this->nsurf;
int& nfreq_ = this->nfreq;
int fft_size = 2 * NCHILD * nfreq_;
int nnodes = nodes.size();
int npos = REL_COORD[M2L_Type].size(); // number of relative positions
// allocate memory
std::vector<T> all_up_equiv, all_dn_equiv;
all_up_equiv.reserve(nnodes*nsurf_);
all_dn_equiv.reserve(nnodes*nsurf_);
std::vector<AlignedVec> matrix_M2L(npos, AlignedVec(fft_size*NCHILD, 0));
// setup ifstream of M2L precomputation matrix
std::ifstream ifile(this->filename, std::ifstream::binary);
ifile.seekg(0, ifile.end);
size_t fsize = ifile.tellg(); // file size in bytes
size_t msize = NCHILD * NCHILD * nfreq_ * 2 * sizeof(real_t); // size in bytes for each M2L matrix
ifile.seekg(fsize - this->depth*npos*msize, ifile.beg); // go to the start of M2L section
// collect all upward equivalent charges
#pragma omp parallel for collapse(2)
for (int i=0; i<nnodes; ++i) {
for (int j=0; j<nsurf_; ++j) {
all_up_equiv[i*nsurf_+j] = nodes[i].up_equiv[j];
all_dn_equiv[i*nsurf_+j] = nodes[i].dn_equiv[j];
}
}
// FFT-accelerate M2L
for (int l=0; l<this->depth; ++l) {
// load M2L matrix for current level
for (int i=0; i<npos; ++i) {
ifile.read(reinterpret_cast<char*>(matrix_M2L[i].data()), msize);
}
AlignedVec fft_in, fft_out;
fft_in.reserve(m2ldata[l].fft_offset.size()*fft_size);
fft_out.reserve(m2ldata[l].ifft_offset.size()*fft_size);
fft_up_equiv(m2ldata[l].fft_offset, all_up_equiv, fft_in);
hadamard_product(m2ldata[l].interaction_count_offset,
m2ldata[l].interaction_offset_f,
fft_in, fft_out, matrix_M2L);
ifft_dn_check(m2ldata[l].ifft_offset, fft_out, all_dn_equiv);
}
// update all downward check potentials
#pragma omp parallel for collapse(2)
for (int i=0; i<nnodes; ++i) {
for (int j=0; j<nsurf_; ++j) {
nodes[i].dn_equiv[j] = all_dn_equiv[i*nsurf_+j];
}
}
ifile.close(); // close ifstream
}
};
/** Below are member function specializations
*/
template <>
void Fmm<real_t>::precompute_check2equiv() {
real_t c[3] = {0, 0, 0};
int& nsurf_ = this->nsurf;
#pragma omp parallel for
for (int level=0; level<=this->depth; ++level) {
// compute kernel matrix
RealVec up_check_surf = surface(this->p, this->r0, level, c, 2.95);
RealVec up_equiv_surf = surface(this->p, this->r0, level, c, 1.05);
RealVec matrix_c2e(nsurf_*nsurf_); // UC2UE
this->kernel_matrix(up_check_surf, up_equiv_surf, matrix_c2e);
// svd
RealVec S(nsurf_*nsurf_); // singular values
RealVec U(nsurf_*nsurf_), VT(nsurf_*nsurf_);
svd(nsurf_, nsurf_, &matrix_c2e[0], &S[0], &U[0], &VT[0]);
// pseudo-inverse
real_t max_S = 0;
for (int i=0; i<nsurf_; i++) {
max_S = fabs(S[i*nsurf_+i])>max_S ? fabs(S[i*nsurf_+i]) : max_S;
}
for (int i=0; i<nsurf_; i++) {
S[i*nsurf_+i] = S[i*nsurf_+i]>EPS*max_S*4 ? 1.0/S[i*nsurf_+i] : 0.0;
}
RealVec V = transpose(VT, nsurf_, nsurf_);
matrix_UC2E_U[level] = transpose(U, nsurf_, nsurf_);
gemm(nsurf_, nsurf_, nsurf_, &V[0], &S[0], &(matrix_UC2E_V[level][0]));
matrix_DC2E_U[level] = VT;
gemm(nsurf_, nsurf_, nsurf_, &U[0], &S[0], &(matrix_DC2E_V[level][0]));
}
}
template <>
void Fmm<complex_t>::precompute_check2equiv() {
real_t c[3] = {0, 0, 0};
int& nsurf_ = this->nsurf;
#pragma omp parallel for
for (int level=0; level<=this->depth; ++level) {
// compute kernel matrix
RealVec up_check_surf = surface(this->p, this->r0, level, c, 2.95);
RealVec up_equiv_surf = surface(this->p, this->r0, level, c, 1.05);
ComplexVec matrix_c2e(nsurf_*nsurf_); // UC2UE
this->kernel_matrix(up_check_surf, up_equiv_surf, matrix_c2e);
// svd
RealVec S(nsurf_*nsurf_); // singular values
ComplexVec U(nsurf_*nsurf_), VH(nsurf_*nsurf_);
svd(nsurf_, nsurf_, &matrix_c2e[0], &S[0], &U[0], &VH[0]);
// pseudo-inverse
real_t max_S = 0;
for (int i=0; i<nsurf_; i++) {
max_S = fabs(S[i*nsurf_+i])>max_S ? fabs(S[i*nsurf_+i]) : max_S;
}
for (int i=0; i<nsurf_; i++) {
S[i*nsurf_+i] = S[i*nsurf_+i]>EPS*max_S*4 ? 1.0/S[i*nsurf_+i] : 0.0;
}
ComplexVec S_(nsurf_*nsurf_);
for (size_t i=0; i<S_.size(); i++) { // convert S to complex type
S_[i] = S[i];
}
ComplexVec V = conjugate_transpose(VH, nsurf_, nsurf_);
ComplexVec UH = conjugate_transpose(U, nsurf_, nsurf_);
matrix_UC2E_U[level] = UH;
gemm(nsurf_, nsurf_, nsurf_, &V[0], &S_[0], &(matrix_UC2E_V[level][0]));
matrix_DC2E_U[level] = transpose(V, nsurf_, nsurf_);
ComplexVec UHT = transpose(UH, nsurf_, nsurf_);
gemm(nsurf_, nsurf_, nsurf_, &UHT[0], &S_[0], &(matrix_DC2E_V[level][0]));
}
}
//! member function specialization for real type
template <>
void Fmm<real_t>::precompute_M2L(std::ofstream& file) {
int n1 = this->p * 2;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int fft_size = 2 * nfreq_ * NCHILD * NCHILD;
std::vector<RealVec> matrix_M2L_Helper(REL_COORD[M2L_Helper_Type].size(),
RealVec(2*nfreq_));
std::vector<AlignedVec> matrix_M2L(REL_COORD[M2L_Type].size(), AlignedVec(fft_size));
// create fft plan
RealVec fftw_in(nconv_);
RealVec fftw_out(2*nfreq_);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_dft_r2c(3, dim, fftw_in.data(), reinterpret_cast<fft_complex*>(fftw_out.data()), FFTW_ESTIMATE);
RealVec trg_coord(3,0);
for (int l=1; l<this->depth+1; ++l) {
// compute M2L kernel matrix, perform DFT
#pragma omp parallel for
for (size_t i=0; i<REL_COORD[M2L_Helper_Type].size(); ++i) {
real_t coord[3];
for (int d=0; d<3; d++) {
coord[d] = REL_COORD[M2L_Helper_Type][i][d] * this->r0 * powf(0.5, l-1); // relative coords
}
RealVec conv_coord = convolution_grid(this->p, this->r0, l, coord); // convolution grid
RealVec conv_value(nconv_); // potentials on convolution grid
this->kernel_matrix(conv_coord, trg_coord, conv_value);
fft_execute_dft_r2c(plan, conv_value.data(), reinterpret_cast<fft_complex*>(matrix_M2L_Helper[i].data()));
}
// convert M2L_Helper to M2L and reorder data layout to improve locality
#pragma omp parallel for
for (size_t i=0; i<REL_COORD[M2L_Type].size(); ++i) {
for (int j=0; j<NCHILD*NCHILD; j++) { // loop over child's relative positions
int child_rel_idx = M2L_INDEX_MAP[i][j];
if (child_rel_idx != -1) {
for (int k=0; k<nfreq_; k++) { // loop over frequencies
int new_idx = k*(2*NCHILD*NCHILD) + 2*j;
matrix_M2L[i][new_idx+0] = matrix_M2L_Helper[child_rel_idx][k*2+0] / nconv_; // real
matrix_M2L[i][new_idx+1] = matrix_M2L_Helper[child_rel_idx][k*2+1] / nconv_; // imag
}
}
}
}
// write to file
for(auto& vec : matrix_M2L) {
file.write(reinterpret_cast<char*>(vec.data()), fft_size*sizeof(real_t));
}
}
// destroy fftw plan
fft_destroy_plan(plan);
}
//! member function specialization for complex type
template <>
void Fmm<complex_t>::precompute_M2L(std::ofstream& file) {
int n1 = this->p * 2;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int fft_size = 2 * nfreq_ * NCHILD * NCHILD;
std::vector<RealVec> matrix_M2L_Helper(REL_COORD[M2L_Helper_Type].size(),
RealVec(2*nfreq_));
std::vector<AlignedVec> matrix_M2L(REL_COORD[M2L_Type].size(), AlignedVec(fft_size));
// create fft plan
RealVec fftw_in(nconv_);
RealVec fftw_out(2*nfreq_);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_dft(3, dim,
reinterpret_cast<fft_complex*>(fftw_in.data()),
reinterpret_cast<fft_complex*>(fftw_out.data()),
FFTW_FORWARD, FFTW_ESTIMATE);
RealVec trg_coord(3,0);
for (int l=1; l<this->depth+1; ++l) {
// compute M2L kernel matrix, perform DFT
#pragma omp parallel for
for (size_t i=0; i<REL_COORD[M2L_Helper_Type].size(); ++i) {
real_t coord[3];
for (int d=0; d<3; d++) {
coord[d] = REL_COORD[M2L_Helper_Type][i][d] * this->r0 * powf(0.5, l-1); // relative coords
}
RealVec conv_coord = convolution_grid(this->p, this->r0, l, coord); // convolution grid
ComplexVec conv_value(nconv_); // potentials on convolution grid
this->kernel_matrix(conv_coord, trg_coord, conv_value);
fft_execute_dft(plan, reinterpret_cast<fft_complex*>(conv_value.data()),
reinterpret_cast<fft_complex*>(matrix_M2L_Helper[i].data()));
}
// convert M2L_Helper to M2L and reorder data layout to improve locality
#pragma omp parallel for
for (size_t i=0; i<REL_COORD[M2L_Type].size(); ++i) {
for (int j=0; j<NCHILD*NCHILD; j++) { // loop over child's relative positions
int child_rel_idx = M2L_INDEX_MAP[i][j];
if (child_rel_idx != -1) {
for (int k=0; k<nfreq_; k++) { // loop over frequencies
int new_idx = k*(2*NCHILD*NCHILD) + 2*j;
matrix_M2L[i][new_idx+0] = matrix_M2L_Helper[child_rel_idx][k*2+0] / nconv_; // real
matrix_M2L[i][new_idx+1] = matrix_M2L_Helper[child_rel_idx][k*2+1] / nconv_; // imag
}
}
}
}
// write to file
for(auto& vec : matrix_M2L) {
file.write(reinterpret_cast<char*>(vec.data()), fft_size*sizeof(real_t));
}
}
// destroy fftw plan
fft_destroy_plan(plan);
}
template <>
void Fmm<real_t>::fft_up_equiv(std::vector<size_t>& fft_offset, RealVec& all_up_equiv, AlignedVec& fft_in) {
int& nsurf_ = this->nsurf;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int n1 = this->p * 2;
auto map = generate_surf2conv_up(p);
size_t fft_size = 2 * NCHILD * nfreq_;
AlignedVec fftw_in(nconv_ * NCHILD);
AlignedVec fftw_out(fft_size);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_many_dft_r2c(3, dim, NCHILD,
(real_t*)&fftw_in[0], nullptr, 1, nconv_,
(fft_complex*)(&fftw_out[0]), nullptr, 1, nfreq_,
FFTW_ESTIMATE);
#pragma omp parallel for
for (size_t node_idx=0; node_idx<fft_offset.size(); node_idx++) {
RealVec buffer(fft_size, 0);
RealVec equiv_t(NCHILD*nconv_, 0.);
real_t* up_equiv = &all_up_equiv[fft_offset[node_idx]]; // offset ptr of node's 8 child's up_equiv in all_up_equiv, size=8*nsurf_
real_t* up_equiv_f = &fft_in[fft_size*node_idx]; // offset ptr of node_idx in fft_in vector, size=fftsize
for (int k=0; k<nsurf_; k++) {
size_t idx = map[k];
for (int j=0; j<NCHILD; j++)
equiv_t[idx+j*nconv_] = up_equiv[j*nsurf_+k];
}
fft_execute_dft_r2c(plan, &equiv_t[0], (fft_complex*)&buffer[0]);
for (int k=0; k<nfreq_; k++) {
for (int j=0; j<NCHILD; j++) {
up_equiv_f[2*(NCHILD*k+j)+0] = buffer[2*(nfreq_*j+k)+0];
up_equiv_f[2*(NCHILD*k+j)+1] = buffer[2*(nfreq_*j+k)+1];
}
}
}
fft_destroy_plan(plan);
}
template <>
void Fmm<complex_t>::fft_up_equiv(std::vector<size_t>& fft_offset, ComplexVec& all_up_equiv, AlignedVec& fft_in) {
int& nsurf_ = this->nsurf;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int n1 = this->p * 2;
auto map = generate_surf2conv_up(p);
size_t fft_size = 2 * NCHILD * nfreq_;
ComplexVec fftw_in(nconv_ * NCHILD);
AlignedVec fftw_out(fft_size);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_many_dft(3, dim, NCHILD, reinterpret_cast<fft_complex*>(&fftw_in[0]),
nullptr, 1, nconv_, (fft_complex*)(&fftw_out[0]), nullptr, 1, nfreq_,
FFTW_FORWARD, FFTW_ESTIMATE);
#pragma omp parallel for
for (size_t node_idx=0; node_idx<fft_offset.size(); node_idx++) {
RealVec buffer(fft_size, 0);
ComplexVec equiv_t(NCHILD*nconv_, complex_t(0.,0.));
complex_t* up_equiv = &all_up_equiv[fft_offset[node_idx]]; // offset ptr of node's 8 child's up_equiv in all_up_equiv, size=8*nsurf_
real_t* up_equiv_f = &fft_in[fft_size*node_idx]; // offset ptr of node_idx in fft_in vector, size=fftsize
for (int k=0; k<nsurf_; k++) {
size_t idx = map[k];
for (int j=0; j<NCHILD; j++)
equiv_t[idx+j*nconv_] = up_equiv[j*nsurf_+k];
}
fft_execute_dft(plan, reinterpret_cast<fft_complex*>(&equiv_t[0]), (fft_complex*)&buffer[0]);
for (int k=0; k<nfreq_; k++) {
for (int j=0; j<NCHILD; j++) {
up_equiv_f[2*(NCHILD*k+j)+0] = buffer[2*(nfreq_*j+k)+0];
up_equiv_f[2*(NCHILD*k+j)+1] = buffer[2*(nfreq_*j+k)+1];
}
}
}
fft_destroy_plan(plan);
}
template <>
void Fmm<real_t>::ifft_dn_check(std::vector<size_t>& ifft_offset, AlignedVec& fft_out, RealVec& all_dn_equiv) {
int& nsurf_ = this->nsurf;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int n1 = this->p * 2;
auto map = generate_surf2conv_dn(p);
size_t fft_size = 2 * NCHILD * nfreq_;
AlignedVec fftw_in(fft_size);
AlignedVec fftw_out(nconv_ * NCHILD);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_many_dft_c2r(3, dim, NCHILD,
(fft_complex*)(&fftw_in[0]), nullptr, 1, nfreq_,
(real_t*)(&fftw_out[0]), nullptr, 1, nconv_,
FFTW_ESTIMATE);
#pragma omp parallel for
for (size_t node_idx=0; node_idx<ifft_offset.size(); node_idx++) {
RealVec buffer0(fft_size, 0);
RealVec buffer1(fft_size, 0);
real_t* dn_check_f = &fft_out[fft_size*node_idx]; // offset ptr for node_idx in fft_out vector, size=fftsize
real_t* dn_equiv = &all_dn_equiv[ifft_offset[node_idx]]; // offset ptr for node_idx's child's dn_equiv in all_dn_equiv, size=numChilds * nsurf_
for (int k=0; k<nfreq_; k++)
for (int j=0; j<NCHILD; j++) {
buffer0[2*(nfreq_*j+k)+0] = dn_check_f[2*(NCHILD*k+j)+0];
buffer0[2*(nfreq_*j+k)+1] = dn_check_f[2*(NCHILD*k+j)+1];
}
fft_execute_dft_c2r(plan, (fft_complex*)&buffer0[0], (real_t*)(&buffer1[0]));
for (int k=0; k<nsurf_; k++) {
size_t idx = map[k];
for (int j=0; j<NCHILD; j++)
dn_equiv[nsurf_*j+k] += buffer1[idx+j*nconv_];
}
}
fft_destroy_plan(plan);
}
template <>
void Fmm<complex_t>::ifft_dn_check(std::vector<size_t>& ifft_offset, AlignedVec& fft_out, ComplexVec& all_dn_equiv) {
int& nsurf_ = this->nsurf;
int& nconv_ = this->nconv;
int& nfreq_ = this->nfreq;
int n1 = this->p * 2;
auto map = generate_surf2conv_dn(p);
size_t fft_size = 2 * NCHILD * nfreq_;
AlignedVec fftw_in(fft_size);
ComplexVec fftw_out(nconv_*NCHILD);
int dim[3] = {n1, n1, n1};
fft_plan plan = fft_plan_many_dft(3, dim, NCHILD, (fft_complex*)(&fftw_in[0]), nullptr, 1, nfreq_,
reinterpret_cast<fft_complex*>(&fftw_out[0]), nullptr, 1, nconv_,
FFTW_BACKWARD, FFTW_ESTIMATE);
#pragma omp parallel for
for (size_t node_idx=0; node_idx<ifft_offset.size(); node_idx++) {
RealVec buffer0(fft_size, 0);
ComplexVec buffer1(NCHILD*nconv_, 0);
real_t* dn_check_f = &fft_out[fft_size*node_idx];
complex_t* dn_equiv = &all_dn_equiv[ifft_offset[node_idx]];
for (int k=0; k<nfreq_; k++)
for (int j=0; j<NCHILD; j++) {
buffer0[2*(nfreq_*j+k)+0] = dn_check_f[2*(NCHILD*k+j)+0];
buffer0[2*(nfreq_*j+k)+1] = dn_check_f[2*(NCHILD*k+j)+1];
}
fft_execute_dft(plan, (fft_complex*)&buffer0[0], reinterpret_cast<fft_complex*>(&buffer1[0]));
for (int k=0; k<nsurf_; k++) {
size_t idx = map[k];
for (int j=0; j<NCHILD; j++)
dn_equiv[nsurf_*j+k]+=buffer1[idx+j*nconv_];
}
}
fft_destroy_plan(plan);
}
} // end namespace
#endif
|
sum.h
|
#pragma once
#include <array>
#include <vector>
#include <omp.h>
#include "_cuda.h"
using namespace std;
template <class T>
T sum(T *x, int N) {
T a = T();
for (int i=0; i<N; i++)
a += x[i];
return a;
}
template <class T, size_t N>
T sum(array<T, N>& x) {
return sum(x.data(), x.size());
}
template <class T>
T sum(vector<T>& x) {
return sum(x.data(), x.size());
}
template <class T>
T sumOmp(T *x, int N) {
T a = T();
#pragma omp parallel for reduction (+:a)
for (int i=0; i<N; i++)
a += x[i];
return a;
}
template <class T, size_t N>
T sumOmp(array<T, N>& x) {
return sumOmp(x.data(), x.size());
}
template <class T>
T sumOmp(vector<T>& x) {
return sumOmp(x.data(), x.size());
}
template <class T>
__device__ void sumKernelReduce(T* a, int N, int i) {
__syncthreads();
for (N=N/2; N>0; N/=2) {
if (i < N) a[i] += a[N+i];
__syncthreads();
}
}
template <class T>
__device__ T sumKernelLoop(T *x, int N, int i, int DI) {
T a = T();
for (; i<N; i+=DI)
a += x[i];
return a;
}
template <class T>
__global__ void sumKernel(T *a, T *x, int N) {
DEFINE(t, b, B, G);
__shared__ T cache[_THREADS];
cache[t] = sumKernelLoop(x, N, B*b+t, G*B);
sumKernelReduce(cache, B, t);
if (t == 0) a[b] = cache[0];
}
template <class T>
T sumCuda(T *x, int N) {
int threads = _THREADS;
int blocks = max(ceilDiv(N, threads), 1024);
size_t X1 = N * sizeof(T);
size_t A1 = blocks * sizeof(T);
T *aPartial = (T*) malloc(A1);
T *xD, *aPartialD;
TRY( cudaMalloc(&xD, X1) );
TRY( cudaMalloc(&aPartialD, A1) );
TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) );
sumKernel<<<blocks, threads>>>(aPartialD, xD, N);
TRY( cudaMemcpy(aPartial, aPartialD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(xD) );
TRY( cudaFree(aPartialD) );
return sum(aPartial, blocks);
}
template <class T, size_t N>
T sumCuda(array<T, N>& x) {
return sumCuda(x.data(), x.size());
}
template <class T>
T sumCuda(vector<T>& x) {
return sumCuda(x.data(), x.size());
}
|
contraction_tests.h
|
/******************************************************************************
* contraction_tests.h
*
* Source of VieCut.
*
******************************************************************************
* Copyright (C) 2017 Alexander Noe <[email protected]>
*
* Published under the MIT license in the LICENSE file.
*****************************************************************************/
#pragma once
#include <algorithm>
#include <atomic>
#include <memory>
#include <utility>
#include <vector>
#include "common/definitions.h"
#include "data_structure/graph_access.h"
#include "tlx/logger.hpp"
#ifdef PARALLEL
#include "parallel/data_structure/union_find.h"
#else
#include "data_structure/union_find.h"
#endif
class tests {
private:
static void sort3(EdgeWeight w1, EdgeWeight w2, EdgeWeight w3) {
if (w1 < w2) {
if (w2 < w3) {
return;
} else if (w1 < w3) {
std::swap(w2, w3);
} else {
EdgeWeight tmp = std::move(w1);
w1 = std::move(w3);
w3 = std::move(w2);
w2 = std::move(tmp);
}
} else {
if (w1 < w3) {
std::swap(w1, w2);
} else if (w3 < w2) {
std::swap(w1, w3);
} else {
EdgeWeight tmp = std::move(w1);
w1 = std::move(w2);
w2 = std::move(w3);
w3 = std::move(tmp);
}
}
}
public:
template <class GraphPtr>
static union_find prTests12(GraphPtr G,
EdgeWeight weight_limit,
bool find_all_cuts = false) {
union_find uf(G->number_of_nodes());
G->computeDegrees();
// workaround for std::vector<bool> not being usable in parallel
std::vector<uint8_t> contracted(G->number_of_nodes(), false);
NodeID end = G->number_of_nodes();
#pragma omp parallel for schedule(dynamic, 100)
for (NodeID n = 0; n < end; ++n) {
NodeWeight n_wgt = G->getWeightedNodeDegree(n);
for (EdgeID e : G->edges_of(n)) {
auto [tgt, wgt] = G->getEdge(n, e);
NodeWeight tgt_wgt = G->getWeightedNodeDegree(tgt);
if (wgt >= weight_limit) {
contracted[n] = true;
contracted[tgt] = true;
uf.Union(n, tgt);
}
// if we want to find all cuts
// we are not allowed to contract an edge
// when an incident vertex has degree mincut
// (as the singleton cut might be important)
if (!find_all_cuts ||
(n_wgt >= weight_limit && tgt_wgt >= weight_limit)) {
// node degrees change when we contract edges.
// thus, we only use PR 2 or 3
// when the incident vertices haven't been contracted yet
// keeping a data structure with current degrees
// would be too expensive in parallel
if ((2 * wgt) > n_wgt) {
if (__sync_bool_compare_and_swap(&contracted[n],
false, true)) {
contracted[tgt] = true;
uf.Union(n, tgt);
}
} else if ((2 * wgt) > tgt_wgt) {
if (__sync_bool_compare_and_swap(&contracted[tgt],
false, true)) {
contracted[n] = true;
uf.Union(n, tgt);
}
}
}
}
}
return uf;
}
template <class GraphPtr>
static union_find prTests34(GraphPtr G,
EdgeWeight weight_limit,
bool find_all_cuts = false) {
union_find uf(G->number_of_nodes());
G->computeDegrees();
std::vector<uint8_t> finished(G->number_of_nodes(), false);
std::vector<uint8_t> contracted(G->number_of_nodes(), 0);
#pragma omp parallel
{
std::vector<std::pair<NodeID, EdgeID> > marked(
G->number_of_nodes(),
std::make_pair(UNDEFINED_NODE, UNDEFINED_EDGE));
#pragma omp for schedule(dynamic, 100)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
if (finished[n])
continue;
finished[n] = true;
for (EdgeID e : G->edges_of(n)) {
NodeID tgt = G->getEdgeTarget(n, e);
if (tgt > n) {
marked[tgt] = std::make_pair(n, e);
}
}
NodeID deg_n = G->getWeightedNodeDegree(n);
for (EdgeID e1 : G->edges_of(n)) {
auto [tgt, w1] = G->getEdge(n, e1);
NodeID deg_tgt = G->getWeightedNodeDegree(tgt);
finished[tgt] = true;
EdgeWeight wgt_sum = w1;
if (tgt > n) {
for (EdgeID e2 : G->edges_of(tgt)) {
NodeID tgt2 = G->getEdgeTarget(tgt, e2);
if (marked[tgt2].second == UNDEFINED_EDGE)
continue;
if (marked[tgt2].first != n)
continue;
EdgeWeight w2 = G->getEdgeWeight(tgt, e2);
EdgeWeight w3 = G->getEdgeWeight(
n, marked[tgt2].second);
wgt_sum += std::min(w2, w3);
bool contractible_one_cut =
!find_all_cuts
&& 2 * (w1 + w3) >= deg_n
&& 2 * (w1 + w2) >= deg_tgt;
bool contractible_all_cuts =
find_all_cuts
&& 2 * (w1 + w3) > deg_n
&& 2 * (w1 + w2) > deg_tgt
&& deg_n >= weight_limit
&& deg_tgt >= weight_limit;
if (contractible_one_cut ||
contractible_all_cuts) {
// node degrees change when we contract edges.
// thus, we only use PR 2 or 3 when the
// incident vertices haven't been contracted yet
// keeping a data structure with current
// degrees would be too expensive in parallel
if (__sync_bool_compare_and_swap(
&contracted[n], false, true)) {
if (__sync_bool_compare_and_swap(
&contracted[tgt], false, true)) {
uf.Union(n, tgt);
break;
}
}
}
}
if (wgt_sum >= weight_limit) {
contracted[n] = true;
contracted[tgt] = true;
uf.Union(n, tgt);
}
marked[tgt] = std::make_pair(UNDEFINED_NODE,
UNDEFINED_EDGE);
}
}
}
}
return uf;
}
static void findHeavyEdges(graphAccessPtr G,
union_find* uf,
EdgeWeight weight_limit) {
#pragma omp parallel for schedule(guided)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
for (EdgeID e : G->edges_of(n)) {
if (G->getEdgeWeight(e) > weight_limit) {
if (!uf->SameSet(n, G->getEdgeTarget(e)))
uf->Union(n, G->getEdgeTarget(e));
}
}
}
}
static void findHeavyTriangles(graphAccessPtr G,
union_find* uf,
EdgeWeight weight_limit) {
#pragma omp parallel
{
std::vector<bool> marked(G->number_of_nodes(), false);
#pragma omp for schedule(guided)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
for (EdgeID e : G->edges_of(n)) {
NodeID tgt = G->getEdgeTarget(e);
if (tgt > n) {
marked[tgt] = true;
}
}
for (EdgeID e1 : G->edges_of(n)) {
NodeID tgt = G->getEdgeTarget(e1);
if (tgt > n) {
for (EdgeID e2 : G->edges_of(tgt)) {
NodeID tgt2 = G->getEdgeTarget(e2);
if (marked[tgt2]) {
for (EdgeID e3 : G->edges_of(n)) {
if (G->getEdgeTarget(e3) == tgt2) {
EdgeWeight w1 = G->getEdgeWeight(e1);
EdgeWeight w2 = G->getEdgeWeight(e2);
EdgeWeight w3 = G->getEdgeWeight(e3);
sort3(w1, w2, w3);
if (w1 + w2 > weight_limit) {
#pragma omp critical
{
uf->Union(n, tgt);
uf->Union(n, tgt2);
}
}
break;
}
}
}
}
marked[tgt] = false;
}
}
}
}
}
};
|
schur_eliminator_impl.h
|
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: [email protected] (Sameer Agarwal)
//
// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
// Chunk::start ?
#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
// Eigen has an internal threshold switching between different matrix
// multiplication algorithms. In particular for matrices larger than
// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
// matrix matrix product algorithm that has a higher setup cost. For
// matrix sizes close to this threshold, especially when the matrices
// are thin and long, the default choice may not be optimal. This is
// the case for us, as the default choice causes a 30% performance
// regression when we moved from Eigen2 to Eigen3.
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include <algorithm>
#include <map>
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/invert_psd_matrix.h"
#include "ceres/map_util.h"
#include "ceres/schur_eliminator.h"
#include "ceres/scoped_thread_token.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
#include "ceres/thread_token_provider.h"
#include "Eigen/Dense"
#include "glog/logging.h"
#ifdef CERES_USE_TBB
#include <tbb/parallel_for.h>
#include <tbb/task_scheduler_init.h>
#endif
namespace ceres {
namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
STLDeleteElements(&rhs_locks_);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
int num_eliminate_blocks,
bool assume_full_rank_ete,
const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
assume_full_rank_ete_ = assume_full_rank_ete;
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
buffer_size_ = 1;
chunks_.clear();
lhs_row_layout_.clear();
int lhs_num_rows = 0;
// Add a map object for each block in the reduced linear system
// and build the row/column block structure of the reduced linear
// system.
lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
lhs_num_rows += bs->cols[i].size;
}
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
// containing the same y block are vertically contiguous. Along
// the way also compute the amount of space each chunk will need
// to perform the elimination.
while (r < num_row_blocks) {
const int chunk_block_id = bs->rows[r].cells.front().block_id;
if (chunk_block_id >= num_eliminate_blocks_) {
break;
}
chunks_.push_back(Chunk());
Chunk& chunk = chunks_.back();
chunk.size = 0;
chunk.start = r;
int buffer_size = 0;
const int e_block_size = bs->cols[chunk_block_id].size;
// Add to the chunk until the first block in the row is
// different than the one in the first row for the chunk.
while (r + chunk.size < num_row_blocks) {
const CompressedRow& row = bs->rows[r + chunk.size];
if (row.cells.front().block_id != chunk_block_id) {
break;
}
// Iterate over the blocks in the row, ignoring the first
// block since it is the one to be eliminated.
for (int c = 1; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
if (InsertIfNotPresent(
&(chunk.buffer_layout), cell.block_id, buffer_size)) {
buffer_size += e_block_size * bs->cols[cell.block_id].size;
}
}
buffer_size_ = std::max(buffer_size, buffer_size_);
++chunk.size;
}
CHECK_GT(chunk.size, 0);
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
if (num_threads_ > 1) {
random_shuffle(chunks_.begin(), chunks_.end());
}
buffer_.reset(new double[buffer_size_ * num_threads_]);
// chunk_outer_product_buffer_ only needs to store e_block_size *
// f_block_size, which is always less than buffer_size_, so we just
// allocate buffer_size_ per thread.
chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
rhs_locks_[i] = new Mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Eliminate(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
#else
tbb::task_scheduler_init tbb_task_scheduler_init(num_threads_);
tbb::parallel_for(num_eliminate_blocks_, num_col_blocks, [&](int i) {
#endif // !CERES_USE_TBB
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<Eigen::Dynamic>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
#ifdef CERES_USE_TBB
);
#endif // CERES_USE_TBB
}
ThreadTokenProvider thread_token_provider(num_threads_);
#ifdef CERES_USE_OPENMP
// Eliminate y blocks one chunk at a time. For each chunk, compute
// the entries of the normal equations and the gradient vector block
// corresponding to the y block and then apply Gaussian elimination
// to them. The matrix ete stores the normal matrix corresponding to
// the block being eliminated and array buffer_ contains the
// non-zero blocks in the row corresponding to this y block in the
// normal equations. This computation is done in
// ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian
// elimination to the rhs of the normal equations, updating the rhs
// of the reduced linear system by modifying rhs blocks for all the
// z blocks that share a row block/residual term with the y
// block. EliminateRowOuterProduct does the corresponding operation
// for the lhs of the reduced linear system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = 0; i < chunks_.size(); ++i) {
#else
tbb::task_scheduler_init tbb_task_scheduler_init(num_threads_);
tbb::parallel_for(0, int(chunks_.size()), [&](int i) {
#endif // !CERES_USE_TBB
const ScopedThreadToken scoped_thread_token(&thread_token_provider);
const int thread_id = scoped_thread_token.token();
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
VectorRef(buffer, buffer_size_).setZero();
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
FixedArray<double, 8> g(e_block_size);
typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
gref.setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockAndGradient(
chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
// Normally one wouldn't compute the inverse explicitly, but
// e_block_size will typically be a small number like 3, in
// which case its much faster to compute the inverse once and
// use it to multiply other matrices/vectors instead of doing a
// Solve call over and over again.
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete);
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
FixedArray<double, 8> inverse_ete_g(e_block_size);
MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
inverse_ete.data(),
e_block_size,
e_block_size,
g.get(),
inverse_ete_g.get());
UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(
thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
}
#ifdef CERES_USE_TBB
);
#endif // CERES_USE_TBB
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
BackSubstitute(const BlockSparseMatrix* A,
const double* b,
const double* D,
const double* z,
double* y) {
const CompressedRowBlockStructure* bs = A->block_structure();
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#ifndef CERES_USE_TBB
for (int i = 0; i < chunks_.size(); ++i) {
#else
tbb::task_scheduler_init tbb_task_scheduler_init(num_threads_);
tbb::parallel_for(0, int(chunks_.size()), [&](int i) {
#endif // !CERES_USE_TBB
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
DCHECK_EQ(e_block_id, e_cell.block_id);
FixedArray<double, 8> sj(row.block.size);
typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
sj.get());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
sj.get(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
}
y_block = InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete)
* y_block;
}
#ifdef CERES_USE_TBB
);
#endif // CERES_USE_TBB
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
int b_pos = bs->rows[row_block_counter].block.position;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + b_pos, row.block.size);
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
// g += E_i' b_i
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProduct(int thread_id,
const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
// S(i,j) -= bi' * ete^{-1} b_j
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For rows with no e_blocks, the schur complement update reduces to S
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowsUpdate(const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const double* values = A->values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
// very similar in structure to EBlockRowOuterProduct except for
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
// e_blocks. This is becase rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
// detection will lead to most template parameters being set to
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For a row with an e_block, compute the contribition S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
|
scrypt_fmt.c
|
/*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2013 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "escrypt/crypto_scrypt.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "scrypt"
#define FORMAT_NAME ""
#define FMT_CISCO9 "$9$"
#define FMT_SCRYPTKDF "SCRYPT:"
#ifdef __XOP__
#define ALGORITHM_NAME "Salsa20/8 128/128 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Salsa20/8 128/128 AVX"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Salsa20/8 128/128 SSE2"
#else
#define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (16384, 8, 1)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 256
#define BINARY_ALIGN 1
#define SALT_SIZE BINARY_SIZE
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D", "pleaseletmein"},
{"$7$C6..../....\x01\x09\x0a\x0d\x20\x7f\x80\xff$b7cKqzsQk7txdc9As1WZBHjUPNWQWJW8A.UUUTA5eD1", "\x01\x09\x0a\x0d\x20\x7f\x80\xff"},
{"$7$2/..../....$rNxJWVHNv/mCNcgE/f6/L4zO6Fos5c2uTzhyzoisI62", ""},
{"$7$86....E....NaCl$xffjQo7Bm/.SKRS4B2EuynbOLjAmXU5AbDbRXhoBl64", "password"},
// cisco type 9 hashes. . They are $7$C/..../.... type (N=16384, r=1, p=1) different base-64 (same as WPA). salt used RAW
{"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
{"$9$cvWdfQlRRDKq/U$VFTPha5VHTCbSgSUAo.nPoh50ZiXOw1zmljEjXkaq1g", "123456"},
{"$9$X9fA8mypebLFVj$Klp6X9hxNhkns0kwUIinvLRSIgWOvCwDhVTZqjsycyU", "JtR"},
// 3rd type ScryptKDF.pm format (we saw this in CMIYC 2013)
// Generate in perl with scrypt_hash($_[1],$salt,1<<$N,$r,$p,$bytes)
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
{"SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=","password"},
{"SCRYPT:16384:8:1:bjZkemVmZ3lWVi42:cmBflTPsqGIbg9ZIJRTQdbic8OCUH+904TFmNPBkuEA=","test123"},
{"SCRYPT:16384:8:1:VlVYUzBhQmlNbk5J:bJhm6VUS2UQRwMRqLTvSsljDeq193Ge4aqQDtb94bKg=","hello"},
{"SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0BhlHpZJ3J2jcozCDM7t+sfjkgQ894R+f+ldVWM5atlkA==","password"},
{NULL}
};
// from crypt_scrypt-common.c (removed static from that file on these 3 functions)
extern const uint8_t * decode64_uint32(uint32_t * dst, uint32_t dstbits, const uint8_t * src);
extern uint8_t * encode64_uint32(uint8_t * dst, size_t dstlen, uint32_t src, uint32_t srcbits);
extern int decode64_one(uint32_t * dst, uint8_t src);
static int max_threads;
static escrypt_local_t *local;
static char saved_salt[SALT_SIZE];
static struct {
char key[PLAINTEXT_LENGTH + 1];
char out[BINARY_SIZE];
} *buffer;
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
max_threads = omp_get_max_threads();
self->params.min_keys_per_crypt *= max_threads;
self->params.max_keys_per_crypt *= max_threads;
#else
max_threads = 1;
#endif
local = mem_alloc(sizeof(*local) * max_threads);
for (i = 0; i < max_threads; i++)
escrypt_init_local(&local[i]);
buffer = mem_alloc(sizeof(*buffer) * self->params.max_keys_per_crypt);
}
static char N_to_c(int N) {
int b=0;
while (N>>=1) ++b;
return itoa64[b];
}
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char Buf[256];
char tmp[256], tmp2[256], tmp3[256], tmp4[256], tmp5[6], tmp6[6];
int N, r, p;
if (strncmp(fields[1], FMT_CISCO9, 3) != 0 && strncmp(fields[1], FMT_SCRYPTKDF, 7))
return fields[1];
if (!strncmp(fields[1], FMT_CISCO9, 3)) {
// cisco type 9 hashes. scrypt params: N=16384, r=1, p=1 hash in crypt format. Change it to CryptBS.
// salt is 14 byte RAW, we can use it as is.
//from: {"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
//to: {"$7$C/..../....nhEmQVczB7dqsO$AG.yl8LDCkiErlh4ttizmxYCXSiXYrNY6vKmLDKj/P4", "cisco"},
if (strlen(fields[1]) != 4+14+43)
return fields[1];
N=1<<14; r=1; p=1;
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
sprintf (Buf, "$7$%c%s%s%14.14s$%s", N_to_c(N), tmp5, tmp6, &(fields[1][3]),
base64_convert_cp(&(fields[1][3+14+1]), e_b64_crypt, 43, tmp, e_b64_cryptBS, sizeof(tmp), flg_Base64_NO_FLAGS));
} else {
// ScryptKDF.pm (perl) format scrypt, generated by: scrypt_hash($_[1],$salt,$N,$r,$p,$bytes); Since N, r, p
// AND bytes are variable, we have to handle computing all of them. NOTE, we may have to make changes to
// the crypto_scrypt-common.c to handle the variable number of bytes.
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
//from: {"SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=","password"},
//to: {"$7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02","password"},
char *cp = strrchr(fields[1], ':'), *cp2;
if (!cp || strlen(cp) > sizeof(tmp2))
return fields[1];
strcpy(tmp2, &cp[1]); // hash (mime format, we need cryptBS)
cp2 = cp;
--cp2;
while (cp2 > fields[1] && *cp2 != ':')
--cp2;
if (*cp2 != ':' || cp-cp2 > sizeof(tmp3))
return fields[1];
strnzcpy(tmp3, &cp2[1], cp-cp2); // salt (mime format, we need raw)
cp = &fields[1][7];
N = atoi(cp);
cp = strchr(cp, ':');
if (!cp) return fields[1];
++cp;
r = atoi(cp);
cp = strchr(cp, ':');
if (!cp) return fields[1];
++cp;
p = atoi(cp);
cp = strchr(cp, ':');
if (cp != cp2) return fields[1];
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
memset(tmp4, 0, sizeof(tmp4));
sprintf (Buf, "$7$%c%s%s%s$%s", N_to_c(N), tmp5, tmp6,
base64_convert_cp(tmp3, e_b64_mime, strlen(tmp3), tmp4, e_b64_raw, sizeof(tmp4), flg_Base64_NO_FLAGS),
base64_convert_cp(tmp2, e_b64_mime, strlen(tmp2), tmp, e_b64_cryptBS, sizeof(tmp),flg_Base64_NO_FLAGS));
}
return Buf;
}
static void done(void)
{
int i;
for (i = 0; i < max_threads; i++)
escrypt_free_local(&local[i]);
MEM_FREE(local);
MEM_FREE(buffer);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int length;
unsigned tmp;
if (strncmp(ciphertext, "$7$", 3))
return 0;
for (p = ciphertext + 3; p < ciphertext + (3 + 1 + 5 + 5); p++)
if (atoi64[ARCH_INDEX(*p)] == 0x7F)
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
if (p - ciphertext > BINARY_SIZE - (1 + 43))
return 0;
++p;
length = base64_valid_length(p, e_b64_cryptBS, flg_Base64_NO_FLAGS);
decode64_one(&tmp, ciphertext[3]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4+5]);
if (!tmp)
return 0;
// we want the hash to use 32 bytes OR more. 43 base64 bytes is 32 raw bytes
return p[length]==0 && length >= 43;
}
static void *binary(char *ciphertext)
{
static char out[BINARY_SIZE];
strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */
return out;
}
static void *salt(char *ciphertext)
{
static char out[SALT_SIZE];
char *p = strrchr(ciphertext, '$');
/* NUL padding is required */
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, p - ciphertext);
return out;
}
#define H(s, i) \
((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1]))
/*
* original Hx() macros simple looked at length-2 (last byte, and last byte -2)
* now we look at bytes 40 and 38 from the hash, so that longer hashes can
* be compared to shorter ones. The last byte may be different, so we
* do NOT use that one. This new method works for any number of bytes in
* the scrypt 32 or more.
#define H0(s) \
int i = strlen(s) - 2; \
return i > 0 ? H((s), i) & 0xF : 0
*/
#define H0(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 0 ? H((s), i) & 0xF : 0
#define H1(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & 0xFF : 0
#define H2(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & 0xFFF : 0
#define H3(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10)) & 0xFFFF : 0
#define H4(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & 0xFFFFF : 0
static int binary_hash_0(void *binary)
{
H0((char *)binary);
}
static int binary_hash_1(void *binary)
{
H1((char *)binary);
}
static int binary_hash_2(void *binary)
{
H2((char *)binary);
}
static int binary_hash_3(void *binary)
{
H3((char *)binary);
}
static int binary_hash_4(void *binary)
{
H4((char *)binary);
}
static int get_hash_0(int index)
{
H0(buffer[index].out);
}
static int get_hash_1(int index)
{
H1(buffer[index].out);
}
static int get_hash_2(int index)
{
H2(buffer[index].out);
}
static int get_hash_3(int index)
{
H3(buffer[index].out);
}
static int get_hash_4(int index)
{
H4(buffer[index].out);
}
static int salt_hash(void *salt)
{
int i, h;
i = strlen((char *)salt) - 1;
if (i > 1) i--;
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i - 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])];
h ^= ((unsigned char *)salt)[i];
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void set_key(char *key, int index)
{
strnzcpy(buffer[index].key, key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return buffer[index].key;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
int failed = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(index) shared(count, failed, local, saved_salt, buffer)
#endif
for (index = 0; index < count; index++) {
uint8_t *hash;
hash = escrypt_r(&(local[index]),
(const uint8_t *)(buffer[index].key),
strlen(buffer[index].key),
(const uint8_t *)saved_salt,
(uint8_t *)&(buffer[index].out),
sizeof(buffer[index].out));
if (!hash) {
failed = 1;
buffer[index].out[0] = 0;
}
}
if (failed) {
fprintf(stderr, "scrypt memory allocation failed\n");
error();
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
// binary was created as 32 bytes. It will always be
// <= length of buffer.out. So we use the binary as
// our hash indication lentth (and avoid looking at last byte)
int len = strlen(buffer[0].out)-2;
for (index = 0; index < count; index++)
if (!strncmp((char *)binary, buffer[index].out, len))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
int len = strlen(buffer[index].out)-2;
return !strncmp((char *)binary, buffer[index].out,len);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#if FMT_MAIN_VERSION > 11
static unsigned int tunable_cost_N(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint64_t N;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
N = (uint64_t)1 << N_log2;
}
return (unsigned int) N;
}
static unsigned int tunable_cost_r(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
return (unsigned int) r;
}
static unsigned int tunable_cost_p(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r, p;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
src = decode64_uint32(&p, 30, src);
if (!src)
return 0;
return (unsigned int) p;
}
#endif
struct fmt_main fmt_scrypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"N",
"r",
"p"
},
#endif
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
binary,
salt,
#if FMT_MAIN_VERSION > 11
{
tunable_cost_N,
tunable_cost_r,
tunable_cost_p
},
#endif
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
NULL,
NULL
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
nest_lock.c
|
// RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
//need to use an OpenMP construct so that OMPT will be initialized
#pragma omp parallel num_threads(1)
print_ids(0);
omp_nest_lock_t nest_lock;
printf("%" PRIu64 ": &nest_lock: %" PRIu64 "\n",
ompt_get_thread_data()->value, (ompt_wait_id_t)(uintptr_t)&nest_lock);
omp_init_nest_lock(&nest_lock);
print_fuzzy_address(1);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(2);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(3);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(4);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(5);
omp_destroy_nest_lock(&nest_lock);
print_fuzzy_address(6);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_init_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_nest_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
rwpng.c
|
/*---------------------------------------------------------------------------
pngquant: RGBA -> RGBA-palette quantization program rwpng.c
---------------------------------------------------------------------------
© 1998-2000 by Greg Roelofs.
© 2009-2015 by Kornel Lesiński.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include "png.h" /* if this include fails, you need to install libpng (e.g. libpng-devel package) and run ./configure */
#include "rwpng.h"
#if USE_LCMS
#include "lcms2.h"
#endif
#ifndef Z_BEST_COMPRESSION
#define Z_BEST_COMPRESSION 9
#endif
#ifndef Z_BEST_SPEED
#define Z_BEST_SPEED 1
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
#if PNG_LIBPNG_VER < 10500
typedef png_const_charp png_const_bytep;
#endif
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg);
int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr);
void rwpng_version_info(FILE *fp)
{
const char *pngver = png_get_header_ver(NULL);
#if USE_COCOA
fprintf(fp, " Color profiles are supported via Cocoa. Using libpng %s.\n", pngver);
#elif USE_LCMS
fprintf(fp, " Color profiles are supported via Little CMS. Using libpng %s.\n", pngver);
#else
fprintf(fp, " Compiled with no support for color profiles. Using libpng %s.\n", pngver);
#endif
#if PNG_LIBPNG_VER < 10600
if (strcmp(pngver, "1.3.") < 0) {
fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
} else if (strcmp(pngver, "1.6.") < 0) {
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
#endif
}
#endif
}
struct rwpng_read_data {
FILE *const fp;
png_size_t bytes_read;
};
#if !USE_COCOA
static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr);
png_size_t read = fread(data, 1, length, read_data->fp);
if (!read) {
png_error(png_ptr, "Read error");
}
read_data->bytes_read += read;
}
#endif
struct rwpng_write_state {
FILE *outfile;
png_size_t maximum_file_size;
png_size_t bytes_written;
pngquant_error retval;
};
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (!fwrite(data, length, 1, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
static void user_flush_data(png_structp png_ptr)
{
// libpng never calls this :(
}
static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, png_size_t rowbytes)
{
if (!rowbytes) {
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
}
png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0]));
if (!row_pointers) return NULL;
for(size_t row = 0; row < height; row++) {
row_pointers[row] = base + row * rowbytes;
}
return row_pointers;
}
#if !USE_COCOA
static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk)
{
if (0 == memcmp("iCCP", in_chunk->name, 5) ||
0 == memcmp("cHRM", in_chunk->name, 5) ||
0 == memcmp("gAMA", in_chunk->name, 5)) {
return 0; // not handled
}
struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr);
struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk));
memcpy(chunk->name, in_chunk->name, 5);
chunk->size = in_chunk->size;
chunk->location = in_chunk->location;
chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL;
if (in_chunk->size) {
memcpy(chunk->data, in_chunk->data, in_chunk->size);
}
chunk->next = *head;
*head = chunk;
return 1; // marks as "handled", libpng won't store it
}
#endif
/*
retval:
0 = success
21 = bad sig
22 = bad IHDR
24 = insufficient memory
25 = libpng error (via longjmp())
26 = wrong PNG color type (no alpha channel)
*/
#if !USE_COCOA
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) {
fprintf(stderr, " libpng warning: %s\n", msg);
}
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) {
}
static pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose)
{
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_size_t rowbytes;
int color_type, bit_depth;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr,
rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler);
if (!png_ptr) {
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a non-trivial
* libpng function */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */
}
#if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED)
png_set_option(png_ptr, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
#endif
#if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
/* copy standard chunks too */
png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4);
#endif
png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback);
struct rwpng_read_data read_data = {infile, 0};
png_set_read_fn(png_ptr, &read_data, user_read_data);
png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */
/* alternatively, could make separate calls to png_get_image_width(),
* etc., but want bit_depth and color_type for later [don't care about
* compression_type and filter_type => NULLs] */
png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height,
&bit_depth, &color_type, NULL, NULL, NULL);
// For overflow safety reject images that won't fit in 32-bit
if (mainprog_ptr->width > INT_MAX/mainprog_ptr->height) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* not quite true, but whatever */
}
/* expand palette images to RGB, low-bit-depth grayscale images to 8 bits,
* transparency chunks to full alpha channel; strip 16-bit-per-sample
* images to 8 bits per sample; and convert grayscale to RGB[A] */
/* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */
if (!(color_type & PNG_COLOR_MASK_ALPHA)) {
#ifdef PNG_READ_FILLER_SUPPORTED
png_set_expand(png_ptr);
png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER);
#else
fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE;
return mainprog_ptr->retval;
#endif
}
if (bit_depth == 16) {
png_set_strip_16(png_ptr);
}
if (!(color_type & PNG_COLOR_MASK_COLOR)) {
png_set_gray_to_rgb(png_ptr);
}
/* get source gamma for gamma correction, or use sRGB default */
double gamma = 0.45455;
if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
mainprog_ptr->input_color = RWPNG_SRGB;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
png_get_gAMA(png_ptr, info_ptr, &gamma);
if (gamma > 0 && gamma <= 1.0) {
mainprog_ptr->input_color = RWPNG_GAMA_ONLY;
mainprog_ptr->output_color = RWPNG_GAMA_ONLY;
} else {
fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma);
mainprog_ptr->input_color = RWPNG_NONE;
mainprog_ptr->output_color = RWPNG_NONE;
gamma = 0.45455;
}
}
mainprog_ptr->gamma = gamma;
png_set_interlace_handling(png_ptr);
/* all transformations have been registered; now update info_ptr data,
* get rowbytes and channels, and allocate image memory */
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
if ((mainprog_ptr->rgba_data = malloc(rowbytes * mainprog_ptr->height)) == NULL) {
fprintf(stderr, "pngquant readpng: unable to allocate image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
/* now we can go ahead and just read the whole image */
png_read_image(png_ptr, row_pointers);
/* and we're done! (png_read_end() can be omitted if no processing of
* post-IDAT text/time/etc. is desired) */
png_read_end(png_ptr, NULL);
#if USE_LCMS
#if PNG_LIBPNG_VER < 10500
png_charp ProfileData;
#else
png_bytep ProfileData;
#endif
png_uint_32 ProfileLen;
cmsHPROFILE hInProfile = NULL;
/* color_type is read from the image before conversion to RGBA */
int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR;
/* embedded ICC profile */
if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) {
hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen);
cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile);
/* only RGB (and GRAY) valid for PNGs */
if (colorspace == cmsSigRgbData && COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
if (colorspace == cmsSigGrayData && !COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP_WARN_GRAY;
mainprog_ptr->output_color = RWPNG_SRGB;
}
cmsCloseProfile(hInProfile);
hInProfile = NULL;
}
}
/* build RGB profile from cHRM and gAMA */
if (hInProfile == NULL && COLOR_PNG &&
!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
cmsCIExyY WhitePoint;
cmsCIExyYTRIPLE Primaries;
png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y,
&Primaries.Red.x, &Primaries.Red.y,
&Primaries.Green.x, &Primaries.Green.y,
&Primaries.Blue.x, &Primaries.Blue.y);
WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0;
cmsToneCurve *GammaTable[3];
GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma);
hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable);
cmsFreeToneCurve(GammaTable[0]);
mainprog_ptr->input_color = RWPNG_GAMA_CHRM;
mainprog_ptr->output_color = RWPNG_SRGB;
}
/* transform image to sRGB colorspace */
if (hInProfile != NULL) {
cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile();
cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8,
hOutProfile, TYPE_RGBA_8,
INTENT_PERCEPTUAL,
omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0);
#pragma omp parallel for \
if (mainprog_ptr->height*mainprog_ptr->width > 8000) \
schedule(static)
for (unsigned int i = 0; i < mainprog_ptr->height; i++) {
/* It is safe to use the same block for input and output,
when both are of the same TYPE. */
cmsDoTransform(hTransform, row_pointers[i],
row_pointers[i],
mainprog_ptr->width);
}
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
mainprog_ptr->gamma = 0.45455;
}
#endif
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->file_size = read_data.bytes_read;
mainprog_ptr->row_pointers = (unsigned char **)row_pointers;
return SUCCESS;
}
#endif
static void rwpng_free_chunks(struct rwpng_chunk *chunk) {
if (!chunk) return;
rwpng_free_chunks(chunk->next);
free(chunk->data);
free(chunk);
}
void rwpng_free_image24(png24_image *image)
{
free(image->row_pointers);
image->row_pointers = NULL;
free(image->rgba_data);
image->rgba_data = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
void rwpng_free_image8(png8_image *image)
{
free(image->indexed_data);
image->indexed_data = NULL;
free(image->row_pointers);
image->row_pointers = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose)
{
#if USE_COCOA
return rwpng_read_image24_cocoa(infile, input_image_p);
#else
return rwpng_read_image24_libpng(infile, input_image_p, verbose);
#endif
}
static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression)
{
/* could also replace libpng warning-handler (final NULL), but no need: */
*png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL);
if (!(*png_ptr_p)) {
return LIBPNG_INIT_ERROR; /* out of memory */
}
*info_ptr_p = png_create_info_struct(*png_ptr_p);
if (!(*info_ptr_p)) {
png_destroy_write_struct(png_ptr_p, NULL);
return LIBPNG_INIT_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a PNG-writing
* libpng function, unless an alternate error handler was installed--
* but compatible error handlers must either use longjmp() themselves
* (as in this program) or exit immediately, so here we go: */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_write_struct(png_ptr_p, info_ptr_p);
return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */
}
png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION);
png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better
return SUCCESS;
}
static void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers)
{
png_write_info(*png_ptr_p, *info_ptr_p);
png_set_packing(*png_ptr_p);
png_write_image(*png_ptr_p, row_pointers);
png_write_end(*png_ptr_p, NULL);
png_destroy_write_struct(png_ptr_p, info_ptr_p);
}
static void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma, rwpng_color_transform color)
{
if (color != RWPNG_GAMA_ONLY && color != RWPNG_NONE) {
png_set_gAMA(png_ptr, info_ptr, gamma);
}
if (color == RWPNG_SRGB) {
png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual
}
}
pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
if (mainprog_ptr->num_palette > 256) return INVALID_ARGUMENT;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression);
if (retval) return retval;
struct rwpng_write_state write_state;
write_state = (struct rwpng_write_state){
.outfile = outfile,
.maximum_file_size = mainprog_ptr->maximum_file_size,
.retval = SUCCESS,
};
png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data);
// Palette images generally don't gain anything from filtering
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
/* set the image parameters appropriately */
int sample_depth;
#if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */
if (mainprog_ptr->num_palette <= 2)
sample_depth = 1;
else if (mainprog_ptr->num_palette <= 4)
sample_depth = 2;
else if (mainprog_ptr->num_palette <= 16)
sample_depth = 4;
else
#endif
sample_depth = 8;
struct rwpng_chunk *chunk = mainprog_ptr->chunks;
int chunk_num=0;
while(chunk) {
png_unknown_chunk pngchunk = {
.size = chunk->size,
.data = chunk->data,
.location = chunk->location,
};
memcpy(pngchunk.name, chunk->name, 5);
png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1);
#if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600
png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR);
#endif
chunk = chunk->next;
chunk_num++;
}
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
sample_depth, PNG_COLOR_TYPE_PALETTE,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_color palette[256];
png_byte trans[256];
unsigned int num_trans = 0;
for(unsigned int i = 0; i < mainprog_ptr->num_palette; i++) {
palette[i] = (png_color){
.red = mainprog_ptr->palette[i].r,
.green = mainprog_ptr->palette[i].g,
.blue = mainprog_ptr->palette[i].b,
};
trans[i] = mainprog_ptr->palette[i].a;
if (mainprog_ptr->palette[i].a < 255) {
num_trans = i+1;
}
}
png_set_PLTE(png_ptr, info_ptr, palette, mainprog_ptr->num_palette);
if (num_trans > 0) {
png_set_tRNS(png_ptr, info_ptr, trans, num_trans, NULL);
}
rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers);
if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) {
return TOO_LARGE_FILE;
}
return write_state.retval;
}
pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0);
if (retval) return retval;
png_init_io(png_ptr, outfile);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
rwpng_write_end(&info_ptr, &png_ptr, row_pointers);
free(row_pointers);
return SUCCESS;
}
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg)
{
rwpng_png_image *mainprog_ptr;
/* This function, aside from the extra step of retrieving the "error
* pointer" (below) and the fact that it exists within the application
* rather than within libpng, is essentially identical to libpng's
* default error handler. The second point is critical: since both
* setjmp() and longjmp() are called from the same code, they are
* guaranteed to have compatible notions of how big a jmp_buf is,
* regardless of whether _BSD_SOURCE or anything else has (or has not)
* been defined. */
fprintf(stderr, " error: %s (libpng failed)\n", msg);
fflush(stderr);
mainprog_ptr = png_get_error_ptr(png_ptr);
if (mainprog_ptr == NULL) abort();
longjmp(mainprog_ptr->jmpbuf, 1);
}
|
a.39.1.c
|
/* { dg-do run } */
#include <stdio.h>
#include <omp.h>
void
skip (int i)
{
}
void
work (int i)
{
}
int
main ()
{
omp_lock_t lck;
int id;
omp_init_lock (&lck);
#pragma omp parallel shared(lck) private(id)
{
id = omp_get_thread_num ();
omp_set_lock (&lck);
/* only one thread at a time can execute this printf */
printf ("My thread id is %d.\n", id);
omp_unset_lock (&lck);
while (!omp_test_lock (&lck))
{
skip (id); /* we do not yet have the lock,
so we must do something else */
}
work (id); /* we now have the lock
and can do the work */
omp_unset_lock (&lck);
}
omp_destroy_lock (&lck);
return 0;
}
|
mandel-omp-taskloop-Row.c
|
/*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
#pragma omp parallel
#pragma omp single
#pragma omp taskloop grainsize(8)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
ex03.c
|
/* Copyright (c) 2019 CSC Training */
/* Copyright (c) 2021 ENCCS */
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main()
{
int num_devices = omp_get_num_devices();
printf("Number of available devices %d\n", num_devices);
#pragma omp target
{
if (omp_is_initial_device()) {
printf("Running on host\n");
} else {
int nteams= omp_get_num_teams();
int nthreads= omp_get_num_threads();
printf("Running on device with %d teams in total and %d threads in each team\n",nteams,nthreads);
}
}
}
|
section_firstprivate.c
|
#include <stdio.h>
#include "omp_testsuite.h"
int
check_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel
{
#pragma omp sections firstprivate(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of sections */
} /* end of parallel */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
int
crosscheck_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel
{
#pragma omp sections private(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of sections */
} /* end of parallel */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
|
3d7pt_var.c
|
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
transpose.c
|
/*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: transpose
PURPOSE: This program tests the efficiency with which a square matrix
can be transposed and stored in another matrix. The matrices
are distributed identically.
USAGE: Program input is three command line arguments that give the
matrix order, the number of times to repeat the operation
(iterations), and the number of threads to use:
transpose <# threads> <matrix_size> <# iterations> [tile size]
An optional parameter specifies the tile size used to divide the
individual matrix blocks for improved cache and TLB performance.
The output consists of diagnostics to make sure the
transpose worked and timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime() portable wall-timer interface.
bail_out()
test_results() Verify that the transpose worked
HISTORY: Written by Tim Mattson, April 1999.
Updated by Rob Van der Wijngaart, December 2005.
*******************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define A(i,j) A[i+order*(j)]
#define B(i,j) B[i+order*(j)]
static double test_results (size_t , double*, int);
int main(int argc, char ** argv) {
size_t order; /* order of a the matrix */
size_t i, j, it, jt; /* matrix/tile indices */
int Tile_order=32; /* default tile size for tiling of local transpose */
int iterations; /* number of times to do the transpose */
int iter; /* dummy */
int tiling; /* boolean: true if tiling is used */
double bytes; /* combined size of matrices */
double * RESTRICT A; /* buffer to hold original matrix */
double * RESTRICT B; /* buffer to hold transposed matrix */
double abserr; /* absolute error */
double epsilon=1.e-8; /* error tolerance */
double transpose_time,/* timing parameters */
avgtime;
int nthread_input,
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/*********************************************************************
** read and test input parameters
*********************************************************************/
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("OpenMP Matrix transpose: B = A^T\n");
if (argc != 4 && argc != 5){
printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n",
*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
exit(EXIT_FAILURE);
}
order = atoi(*++argv);
if (order <= 0){
printf("ERROR: Matrix Order must be greater than 0 : %zu \n", order);
exit(EXIT_FAILURE);
}
if (argc == 5) Tile_order = atoi(*++argv);
/* a non-positive tile size means no tiling of the local transpose */
tiling = (Tile_order > 0) && ((size_t)Tile_order < order);
if (!tiling) Tile_order = order;
/*********************************************************************
** Allocate space for the input and transpose matrix
*********************************************************************/
A = (double *)prk_malloc(order*order*sizeof(double));
if (A == NULL){
printf(" ERROR: cannot allocate space for input matrix: %ld\n",
order*order*sizeof(double));
exit(EXIT_FAILURE);
}
B = (double *)prk_malloc(order*order*sizeof(double));
if (B == NULL){
printf(" ERROR: cannot allocate space for output matrix: %ld\n",
order*order*sizeof(double));
exit(EXIT_FAILURE);
}
bytes = 2.0 * sizeof(double) * order * order;
#pragma omp parallel private (i, j, it, jt, iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Matrix order = %ld\n", order);
printf("Number of iterations = %d\n", iterations);
if (tiling) {
printf("Tile size = %d\n", Tile_order);
#if COLLAPSE
printf("Loop collapse = on\n");
#else
printf("Loop collapse = off\n");
#endif
}
else
printf("Untiled\n");
}
}
bail_out(num_error);
/* Fill the original matrix, set transpose to known garbage value. */
if (tiling) {
#if COLLAPSE
#pragma omp for collapse(2)
#else
#pragma omp for
#endif
for (j=0; j<order; j+=Tile_order)
for (i=0; i<order; i+=Tile_order)
for (jt=j; jt<MIN(order,j+Tile_order);jt++)
for (it=i; it<MIN(order,i+Tile_order); it++){
A(it,jt) = (double) (order*jt + it);
B(it,jt) = 0.0;
}
}
else {
#pragma omp for
for (j=0;j<order;j++)
for (i=0;i<order; i++) {
A(i,j) = (double) (order*j + i);
B(i,j) = 0.0;
}
}
for (iter = 0; iter<=iterations; iter++){
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
transpose_time = wtime();
}
}
/* Transpose the matrix */
if (!tiling) {
#pragma omp for
for (i=0;i<order; i++)
for (j=0;j<order;j++) {
B(j,i) += A(i,j);
A(i,j) += 1.0;
}
}
else {
#if COLLAPSE
#pragma omp for collapse(2)
#else
#pragma omp for
#endif
for (i=0; i<order; i+=Tile_order)
for (j=0; j<order; j+=Tile_order)
for (it=i; it<MIN(order,i+Tile_order); it++)
for (jt=j; jt<MIN(order,j+Tile_order);jt++) {
B(jt,it) += A(it,jt);
A(it,jt) += 1.0;
}
}
} /* end of iter loop */
#pragma omp barrier
#pragma omp master
{
transpose_time = wtime() - transpose_time;
}
} /* end of OpenMP parallel region */
abserr = test_results (order, B, iterations);
prk_free(B);
prk_free(A);
/*********************************************************************
** Analyze and output results.
*********************************************************************/
if (abserr < epsilon) {
printf("Solution validates\n");
avgtime = transpose_time/iterations;
printf("Rate (MB/s): %lf Avg time (s): %lf\n",
1.0E-06 * bytes/avgtime, avgtime);
#if VERBOSE
printf("Squared errors: %f \n", abserr);
#endif
exit(EXIT_SUCCESS);
}
else {
printf("ERROR: Aggregate squared error %lf exceeds threshold %e\n",
abserr, epsilon);
exit(EXIT_FAILURE);
}
} /* end of main */
/* function that computes the error committed during the transposition */
double test_results (size_t order, double *B, int iterations) {
double abserr=0.0;
size_t i, j;
double addit = ((double)(iterations+1) * (double) (iterations))/2.0;
#pragma omp parallel for reduction(+:abserr)
for (j=0;j<order;j++) {
for (i=0;i<order; i++) {
abserr += ABS(B(i,j) - ((i*order + j)*(iterations+1L)+addit));
}
}
#if VERBOSE
#pragma omp master
{
printf(" Squared sum of differences: %f\n",abserr);
}
#endif
return abserr;
}
|
elemwise_binary_op.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) ||
outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) ||
outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[2].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[1].type_flag_, EType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<bool>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<EType>());
}
});
});
});
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
GB_unop__ainv_int8_int8.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_int8_int8
// op(A') function: GB_unop_tran__ainv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LookupTable.c
|
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/LookupTable.c"
#else
static void THNN_(LookupTable_resetCount)(
THInteger_t *count_data,
THIndexTensor *input)
{
ptrdiff_t i;
THIndex_t *input_data = THIndexTensor_(data)(input);
ptrdiff_t numel = THIndexTensor_(nElement)(input);
for (i = 0; i<numel; i++)
{
int64_t k = input_data[i] - TH_INDEX_BASE;
count_data[k] = 0;
}
for (i = 0; i<numel; i++)
{
int64_t k = input_data[i] - TH_INDEX_BASE;
count_data[k]++;
}
}
void THNN_(LookupTable_accGradParameters)(
THNNState *state,
THIndexTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THIntegerTensor *count,
THTensor *sorted,
THIndexTensor *indices,
bool scaleGradByFreq,
int paddingValue,
accreal ascale)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale);
ptrdiff_t i;
THInteger_t *count_data = NULL;
if (scaleGradByFreq)
{
THIntegerTensor_(resize1d)(count, THTensor_sizeLegacyNoScalars(gradWeight, 0));
count_data = THIntegerTensor_(data)(count);
}
if (!THTensor_(isContiguous)(gradWeight))
THError("gradWeight must be contiguous");
if (!THIndexTensor_(isContiguous)(input))
THError("input must be contiguous");
if (input->is_empty() || (THIndexTensor_(nDimensionLegacyNoScalars)(input) != 1 && THIndexTensor_(nDimensionLegacyNoScalars)(input) != 2)) {
THDescBuff s1 = THIndexTensor_(sizeDesc)(input);
THError("input must be a non-empty vector or matrix, but is of shape: %s", s1.str);
}
THIndex_t *input_data = THIndexTensor_(data)(input);
ptrdiff_t numel = THIndexTensor_(nElement)(input);
int64_t numw = THTensor_(size)(gradWeight, 0);
// check that inputs are all within range
for (i=0; i<numel; i++)
if (input_data[i] < TH_INDEX_BASE || input_data[i] >= numw + TH_INDEX_BASE) {
THError("inputs need to be in the range %ld <= input < %ld, "
"but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE),
input_data[i]);
}
gradOutput = THTensor_(newContiguous)(gradOutput);
real *gw = THTensor_(data)(gradWeight);
real *go = THTensor_(data)(gradOutput);
int64_t stride = THTensor_(stride)(gradWeight, 0);
if (count_data)
THNN_(LookupTable_resetCount)(count_data, input);
#ifdef _OPENMP
if (numel > 1000)
{
// The strategy is to parallelize over sections of the vocabulary, so that
// thread 1 handles updates to gradWeight[0..nVocab/nThreads]. Every thread
// has to traverse the entire input, but the dominating factor is the axpy
// BLAS call.
#pragma omp parallel private(i)
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int64_t start = tid * (numw/nthreads + 1);
int64_t end = start + (numw/nthreads + 1);
for (i=0; i<numel; i++)
{
if (input_data[i] != paddingValue)
{
int64_t k = input_data[i] - TH_INDEX_BASE;
if (k >= start && k < end)
{
real scale_ = scale;
if (count_data) scale_ /= count_data[k];
THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1);
}
}
}
}
THTensor_(free)(gradOutput);
return;
}
#endif
for (i=0; i<numel; i++)
{
if (input_data[i] != paddingValue)
{
int64_t k = input_data[i] - TH_INDEX_BASE;
real scale_ = scale;
if (count_data) scale_ /= count_data[k];
THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1);
}
}
THTensor_(free)(gradOutput);
}
/*
* Keep the norm of weight smaller than maxNorm
*/
static void THNN_(LookupTable_renormRow)(
real *row_data,
int64_t stride,
real maxNorm,
real normType)
{
real norm = 0;
real new_norm;
int64_t j;
for (j=0; j<stride; j++)
{
if (normType == 1) {
norm += fabs(row_data[j]);
} else if (normType == 2) {
norm += row_data[j] * row_data[j];
} else {
norm += pow(fabs(row_data[j]), normType);
}
}
norm = pow(norm, 1.0 / normType);
if (norm > maxNorm)
{
new_norm = maxNorm / (norm + 1e-7);
for (j=0; j<stride; j++) {
row_data[j] *= new_norm;
}
}
}
static int THNN_(compare_THIndex)(const void* a, const void* b)
{
return *(const THIndex_t*)a < *(const THIndex_t*)b ? -1 : 1;
}
void THNN_(LookupTable_renorm)(
THNNState *state,
THIndexTensor *idx,
THTensor *weight,
accreal maxNorm_,
accreal normType_)
{
real maxNorm = TH_CONVERT_ACCREAL_TO_REAL(maxNorm_);
real normType = TH_CONVERT_ACCREAL_TO_REAL(normType_);
if (!THTensor_(isContiguous)(weight))
THError("weight must be contiguous");
if (!THIndexTensor_(isContiguous)(idx))
THError("input must be contiguous");
if (idx->is_empty() || THIndexTensor_(nDimensionLegacyNoScalars)(idx) != 1)
THError("idx must be a non-empty vector");
if (normType <= 0)
THError("non-positive-norm not supported");
ptrdiff_t i;
THIndex_t *row_idx = THIndexTensor_(data)(idx);
ptrdiff_t numel = THIndexTensor_(nElement)(idx);
int64_t numw = THTensor_(size)(weight, 0);
int64_t stride = THTensor_(stride)(weight, 0);
real *gw = THTensor_(data)(weight);
for (i=0; i<numel; i++) {
if (row_idx[i] < TH_INDEX_BASE || row_idx[i] >= numw + TH_INDEX_BASE) {
THError("input need to be in the range %ld <= input < %ld, "
"but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE),
row_idx[i]);
}
}
// get unique indices
qsort(row_idx, numel, sizeof(THIndex_t), THNN_(compare_THIndex));
ptrdiff_t ptr = 0;
for (i=0; i<numel; i++)
if (i == 0 || row_idx[i] != row_idx[i-1])
row_idx[ptr++] = row_idx[i];
numel = ptr;
#ifdef _OPENMP
if (numel > 1000)
{
// The strategy is to parallelize over the rows that appear in
// row_idx, so that thread 1 handles the rows in row_idx[0..numel/nThreads].
// This distributes the work evenly to each thread.
#pragma omp parallel for private(i)
for (i=0; i<numel; i++)
{
int64_t k = row_idx[i] - TH_INDEX_BASE;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
return;
}
#endif
for (i=0; i<numel; i++)
{
int64_t k = row_idx[i] - TH_INDEX_BASE;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
}
#endif
|
HYPRE_parcsr_pcg.c
|
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGCreate( MPI_Comm comm, HYPRE_Solver *solver )
{
hypre_PCGFunctions * pcg_functions;
if (!solver)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
pcg_functions =
hypre_PCGFunctionsCreate(
hypre_CAlloc, hypre_ParKrylovFree, hypre_ParKrylovCommInfo,
hypre_ParKrylovCreateVector,
hypre_ParKrylovDestroyVector, hypre_ParKrylovMatvecCreate,
hypre_ParKrylovMatvec, hypre_ParKrylovMatvecDestroy,
hypre_ParKrylovInnerProd, hypre_ParKrylovCopyVector,
hypre_ParKrylovClearVector,
hypre_ParKrylovScaleVector, hypre_ParKrylovAxpy,
hypre_ParKrylovIdentitySetup, hypre_ParKrylovIdentity );
*solver = ( (HYPRE_Solver) hypre_PCGCreate( pcg_functions ) );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGDestroy( HYPRE_Solver solver )
{
return( hypre_PCGDestroy( (void *) solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_PCGSetup( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSolve( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_PCGSolve( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( HYPRE_PCGSetTol( solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real a_tol )
{
return( HYPRE_PCGSetAbsoluteTol( solver, a_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( HYPRE_PCGSetMaxIter( solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetStopCrit
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetStopCrit( HYPRE_Solver solver,
HYPRE_Int stop_crit )
{
return( HYPRE_PCGSetStopCrit( solver, stop_crit ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetTwoNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetTwoNorm( HYPRE_Solver solver,
HYPRE_Int two_norm )
{
return( HYPRE_PCGSetTwoNorm( solver, two_norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetRelChange( HYPRE_Solver solver,
HYPRE_Int rel_change )
{
return( HYPRE_PCGSetRelChange( solver, rel_change ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetPrecond( HYPRE_Solver solver,
HYPRE_PtrToParSolverFcn precond,
HYPRE_PtrToParSolverFcn precond_setup,
HYPRE_Solver precond_solver )
{
return( HYPRE_PCGSetPrecond( solver,
(HYPRE_PtrToSolverFcn) precond,
(HYPRE_PtrToSolverFcn) precond_setup,
precond_solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetPrecond( HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr )
{
return( HYPRE_PCGGetPrecond( solver, precond_data_ptr ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetPrintLevel
* an obsolete function; use HYPRE_PCG* functions instead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int level )
{
return( HYPRE_PCGSetPrintLevel( solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetLogging
* an obsolete function; use HYPRE_PCG* functions instead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetLogging( HYPRE_Solver solver,
HYPRE_Int level )
{
return( HYPRE_PCGSetLogging( solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( HYPRE_PCGGetNumIterations( solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *norm )
{
return( HYPRE_PCGGetFinalRelativeResidualNorm( solver, norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetResidual( HYPRE_Solver solver,
HYPRE_ParVector *residual )
{
return( HYPRE_PCGGetResidual( solver, (void *) residual ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScaleSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRDiagScaleSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector y,
HYPRE_ParVector x )
{
return 0;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScale
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRDiagScale( HYPRE_Solver solver,
HYPRE_ParCSRMatrix HA,
HYPRE_ParVector Hy,
HYPRE_ParVector Hx )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA;
hypre_ParVector *y = (hypre_ParVector *) Hy;
hypre_ParVector *x = (hypre_ParVector *) Hx;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x));
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, x_data);
//hypre_SyncCudaComputeStream(hypre_handle());
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_size; i++)
{
x_data[i] = y_data[i]/A_data[A_i[i]];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
return ierr;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRSymPrecondSetup
*--------------------------------------------------------------------------*/
/*
HYPRE_Int
HYPRE_ParCSRSymPrecondSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) A;
hypre_ParVector *y = (hypre_ParVector *) b;
hypre_ParVector *x = (hypre_ParVector *) x;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_diag = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Real *A_offd = hypre_CSRMatrixData(hypre_ParCSRMatrixOffD(A));
HYPRE_Int i, ierr = 0;
hypre_ParCSRMatrix *Asym;
MPI_Comm comm;
HYPRE_Int global_num_rows;
HYPRE_Int global_num_cols;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Int num_cols_offd;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
Asym = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
for (i=0; i < hypre_VectorSize(hypre_ParVectorLocalVector(x)); i++)
{
x_data[i] = y_data[i]/A_data[A_i[i]];
}
return ierr;
} */
|
force.h
|
#ifndef FORCE_H
#define FORCE_H
#include <functional>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_errno.h>
#include <vector>
#include "beam.h"
#include "constants.h"
using std::vector;
enum class ForceFormula {PARKHOMCHUK, NONMAG_DERBENEV, NONMAG_MESHKOV, NONMAG_NUM1D, NONMAG_NUM3D, MESHKOV, DSM};
class FrictionForceSolver{
protected:
double time_cooler;
double mag_field = 0;
const double k_f = -4*k_pi*k_c*k_c*k_ke*k_ke*k_e*k_e*k_e/(k_me*1e6);
const double k_wp = 4*k_pi*k_c*k_c*k_e*k_ke/(k_me*1e6);
const double k_rho_min = k_e*k_ke*k_c*k_c/(k_me*1e6);
virtual void init(EBeam& ebeam){};
virtual void fin(){};
double max_impact_factor(double v_dlt, int charge_number,double density_e);
public:
void set_time_cooler(double t){time_cooler = t;}
void set_mag_field(double x){mag_field = x;}
double t_cooler(){return time_cooler;}
virtual void friction_force(int charge_number, int ion_number,
vector<double>& v_tr, vector<double>& v_l, vector<double>& density,
EBeam& ebeam, vector<double>& force_tr, vector<double>& force_long) = 0;
};
class ForcePark: public FrictionForceSolver {
private:
const double k_f = -4*k_c*k_c*k_ke*k_ke*k_e*k_e*k_e/(k_me*1e6);
double t_eff = 0; //Effective temperature.
double v_eff = 0; //Effective velocity.
void rho_lamor_dlt2_eff_e(double v2_eff_e, double mag_field, vector<double>& v_rms_l, vector<double>& v_rms_t, Temperature tpr,
int ion_number, vector<double>& dlt2_eff_e, vector<double>& rho_lamor);
double dlt(Temperature tpr, double v2, vector<double>& dlt2_eff_e, int i);
double lc(Temperature tpr, double rho_max, double rho_min, vector<double>& rho_lamor, int i);
public:
void set_t_eff(double x){t_eff = x; v_eff = sqrt(t_eff*k_c*k_c/(k_me*1e6));}
void set_v_eff(double v){v_eff = v; t_eff = v_eff*v_eff*k_me*1e6/(k_c*k_c);}
virtual void friction_force(int charge_number, int ion_number,
vector<double>& v_tr, vector<double>& v_l, vector<double>& density,
EBeam& ebeam, vector<double>& force_tr, vector<double>& force_long);
};
class ForceNonMag: public FrictionForceSolver {
protected:
bool smooth_rho_max = false;
virtual double f_const(int charge_number){return charge_number*charge_number*k_f;}
double rho_min_const(int charge_number) {return charge_number*k_rho_min;}
double rho_max_1(int charge_number, double density_e){return pow(3*charge_number/density_e, 1.0/3);}
double rho_max_2(double v){return v*time_cooler;};
double rho_max(int charge_number, double v2, double ve2, double ne);
virtual void force(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const,double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l) = 0;
public:
void set_smooth_rho_max(bool b){smooth_rho_max = b;}
virtual void friction_force(int charge_number, int ion_number,
vector<double>& v_tr, vector<double>& v_l, vector<double>& density,
EBeam& ebeam, vector<double>& force_tr, vector<double>& force_long);
};
class ForceNonMagDerbenev: public ForceNonMag {
private:
double rho_max_ve_tr(double ve_tr, double ne);
void force(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
public:
};
class ForceNonMagMeshkov: public ForceNonMag {
private:
void force(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
public:
};
class ForceNonMagNumeric1D: public ForceNonMag {
private:
double f_const(int charge_number){return 0.28209479177387814*charge_number*charge_number*k_f;} //Coef - 1/sqrt(4*pi)
const double k_f = 2*sqrt(2*k_pi)*k_pi*k_c*k_c*k_ke*k_ke*k_e*k_e*k_e/(k_me*1e6);
size_t limit = 100;
double espabs = 1e-6;
double esprel = 1e-6;
struct P{
double v_tr;
double v_l;
double ve_tr;
double ve_l;
int flag; //0: calculate B_tr; else: calculate B_l;
};
#ifdef _OPENMP
vector<gsl_integration_workspace*> gw;
vector<P> p;
#else
gsl_integration_workspace *gw = nullptr;
P p;
#endif // _OPENMP
double b(double q, void* params);
void force(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
public:
void set_espabs(double x){espabs = x;}
void set_esprel(double x){esprel = x;}
ForceNonMagNumeric1D(int n=100);
~ForceNonMagNumeric1D();
};
//
class ForceNonMagNumeric3D: public ForceNonMag {
private:
size_t limit = 100;
double espabs = 1e-5;
double esprel = 1e-3;
struct P{
double v_tr;
double v_l;
double ve_tr;
double ve_l;
double vtr;
double vl;
double rho_max;
int charge_number;
int flag; //0: calculate B_tr; else: calculate B_l;
};
bool use_gsl = false;
bool use_mean_rho_min = false;
#ifdef _OPENMP
vector<gsl_integration_workspace*> giw;
vector<gsl_integration_workspace*> gmw;
vector<gsl_integration_workspace*> gow;
vector<P> p;
#else
gsl_integration_workspace *giw = nullptr;
gsl_integration_workspace *gmw = nullptr;
gsl_integration_workspace *gow = nullptr;
P p;
#endif // _OPENMP
#ifdef _OPENMP
static double mean_rho_min;
static double mean_lc;
#pragma omp threadprivate(mean_rho_min, mean_lc)
static bool first_run;
static vector<vector<double>> exp_vtr;
static vector<double> hlf_v2tr;
static vector<double> hlf_v2l;
static vector<vector<double>> vtr_cos;
static vector<double> vl;
static vector<double> vtr;
static vector<vector<double>> v2tr_sin2;
#pragma omp threadprivate(exp_vtr, hlf_v2tr, hlf_v2l, vtr_cos, vl, vtr, v2tr_sin2, first_run)
#else
double mean_rho_min = 0;
double mean_lc = 0;
vector<vector<double>> exp_vtr;
vector<double> hlf_v2tr;
vector<double> hlf_v2l;
vector<vector<double>> vtr_cos;
vector<double> vl;
vector<double> vtr;
vector<vector<double>> v2tr_sin2;
bool first_run = true;
#endif // _OPENMP
bool const_tmpr = true;
int n_tr = 20;
int n_l = 10;
int n_phi = 10;
double d;
double f_inv_norm;
void pre_int(double sgm_vtr, double sgm_vl);
void calc_exp_vtr(double sgm_vtr, double sgm_vl);
void init(EBeam& ebeam);
double inner_integrand(double phi, void* params);
double middle_integrand(double vl, void* params);
double outter_integrand(double vtr, void* params);
double inner_norm_integrand(double vl, void* params);
double outter_norm_integrand(double vtr, void* params);
void force_grid(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
void force_gsl(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
void force(double v, double v_tr, double v_l, double v2, double ve_tr, double ve_l, double ve2,
double f_const, double rho_min_const, int charge_number, double ne,
double& force_tr, double& force_l);
public:
void set_espabs(double x){espabs = x;}
void set_esprel(double x){esprel = x;}
void set_gsl(bool b) {use_gsl = b;}
void set_mean_rho_min(bool b) {use_mean_rho_min = b;}
void set_grid(int ntr, int nl, int nphi){n_tr = ntr; n_l = nl; n_phi = nphi; first_run = true;}
ForceNonMagNumeric3D(int n=100);
~ForceNonMagNumeric3D();
};
//gsl function wrapper for member functions in class.
template< typename F >
class gsl_function_pp : public gsl_function {
public:
gsl_function_pp(const F& func) : _func(func) {
function = &gsl_function_pp::invoke;
params=this;
}
private:
const F& _func;
static double invoke(double x, void *params) {
return static_cast<gsl_function_pp*>(params)->_func(x);
}
};
class ForceMeshkov: public FrictionForceSolver {
protected:
//A fudge factor to smooth the friction force shape. Using the
// "Classical" default definition here from the BETACOOL documentation
double k = 2;
void force(double ve_tr, double ve_l, double ve2_tr, double ve2_l, double v_tr, double v_l, double v2,
double rho_min_const, int charge_number, double density, double f_const, double& force_tr,double& force_l);
public:
void set_smooth_factor(double x){k = x;}
virtual void friction_force(int charge_number, int ion_number,
vector<double>& v_tr, vector<double>& v_l, vector<double>& density,
EBeam& ebeam, vector<double>& force_tr, vector<double>& force_long);
};
class ForceDSM: public FrictionForceSolver { //Derbenev-Skrinsky-Meshkov formula for magnetized cooling.
bool mag_only = false;
//A fudge factor to smooth the friction force shape. Using the
// "Classical" default definition here from the BETACOOL documentation
double k = 2;
int n_a = 100;
int n_ve = 100;
bool const_tpr = true;
#ifdef _OPENMP
static bool first_run;
static vector<double> a;
static vector<double> cos_a;
static vector<double> tan_a;
static vector<double> t2;
static vector<double> ve;
static vector<double> exp_ve2;
#pragma omp threadprivate(a, cos_a, tan_a, t2, ve, exp_ve2, first_run)
#else
bool first_run = true;
vector<double> a;
vector<double> cos_a;
vector<double> tan_a;
vector<double> t2;
vector<double> ve;
vector<double> exp_ve2;
#endif // _OPENMP
int n_tr = 20;
int n_l = 10;
int n_phi = 10;
#ifdef _OPENMP
static bool first_run_fa;
static double f_inv_norm;
static vector<vector<double>> exp_vtr;
static vector<double> hlf_v2tr;
static vector<double> hlf_v2l;
static vector<vector<double>> vtr_cos;
static vector<double> vl;
static vector<double> vtr;
static vector<vector<double>> v2tr_sin2;
#pragma omp threadprivate(exp_vtr, hlf_v2tr, hlf_v2l, vtr_cos, vl, vtr, v2tr_sin2, first_run_fa, f_inv_norm)
#else
bool first_run_fa = true;
double f_inv_norm;
vector<vector<double>> exp_vtr;
vector<double> hlf_v2tr;
vector<double> hlf_v2l;
vector<vector<double>> vtr_cos;
vector<double> vl;
vector<double> vtr;
vector<vector<double>> v2tr_sin2;
#endif // _OPENMP
protected:
void init(EBeam& ebeam);
void pre_int(double sgm_vtr, double sgm_vl);
void calc_exp_vtr(double sgm_vtr, double sgm_vl);
void calc_alpha();
void calc_ve();
void calc_exp_ve2(double ve2_l, vector<double>& t2);
void force(double ve_tr, double ve_l, double ve2_tr, double ve2_l, double v_tr, double v_l, double v2,
double rho_min_const, int charge_number, double density, double f_const, double& force_tr,double& force_l);
public:
void set_smooth_factor(double x){k = x;}
void set_steps(int i) {n_a = i; n_ve = i; first_run = true;}
void set_grid(int ntr, int nl, int nphi){n_tr = ntr; n_l = nl; n_phi = nphi; first_run_fa = true;}
void set_mag_only(bool b){mag_only = b;}
virtual void friction_force(int charge_number, int ion_number,
vector<double>& v_tr, vector<double>& v_l, vector<double>& density,
EBeam& ebeam, vector<double>& force_tr, vector<double>& force_long);
};
#endif // FORCE_H
|
deconvolution_pack4.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = (y * kernel_w + x) * 16;
__m128 _val0 = _mm_load1_ps(sptr);
__m128 _val1 = _mm_load1_ps(sptr + 1);
__m128 _val2 = _mm_load1_ps(sptr + 2);
__m128 _val3 = _mm_load1_ps(sptr + 3);
__m128 _w0 = _mm_load_ps(kptr + k);
__m128 _w1 = _mm_load_ps(kptr + k + 4);
__m128 _w2 = _mm_load_ps(kptr + k + 8);
__m128 _w3 = _mm_load_ps(kptr + k + 12);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
}
}
kptr += maxk * 16;
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
|
libperf.c
|
/**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on theread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.typical = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
suma.c
|
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, const char * argv[]) {
int n = 100;
if (argc == 1)
{
printf("Use: %s <número de items>\n", argv[0]);
exit(0);
}
else {
n = atoi(argv[1]);
}
int suma = 0;
int i;
int numeros[n];
double t_inicial = 0, t_final = 0;
t_inicial = omp_get_wtime();
#pragma omp parallel for private(i) if (n > 100)
for (i = 0; i < n; ++i) {
numeros[i] = 1;
}
t_final = omp_get_wtime();
printf("La inicialización del vector demoró %.5f\n", t_final - t_inicial);
t_inicial = omp_get_wtime();
#pragma omp parallel private(i)
{
int nhilos = omp_get_num_threads();
int id = omp_get_thread_num();
int num = n / nhilos;
int inicio = id * num;
int fin = inicio + num;
if (id == (nhilos - 1)) { fin = n; }
int suma_local = 0;
for (i = inicio; i < fin; ++i) {
suma_local += numeros[i];
}
//printf("La suma parcial del hilo %d = %d\n", id, suma_local);
#pragma omp critical
{
suma += suma_local;
}
printf("Soy el hilo %d de %d y proceso el elemento [%d]\n", id, nhilos, i);
}
t_final = omp_get_wtime();
printf("La suma del vector demoró %.5f\n", t_final - t_inicial);
//printf("La suma = %d\n", suma);
return 0;
}
|
dgesv.c
|
/**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgesv.c, normal z -> d, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
******************************************************************************/
int plasma_dgesv(int n, int nrhs,
double *pA, int lda, int *ipiv,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaRealDouble, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dgesv(A, ipiv, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
******************************************************************************/
void plasma_omp_dgesv(plasma_desc_t A, int *ipiv,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid B");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pdgetrf(A, ipiv, sequence, request);
plasma_pdgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A,
B,
sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A,
B,
sequence, request);
}
|
pr70550-2.c
|
/* PR middle-end/70550 */
/* { dg-do compile } */
/* { dg-additional-options "-Wuninitialized" } */
void bar (int);
void
foo (void)
{
int i, j, k, l, m, n, o, p, q;
#pragma omp task /* { dg-bogus "is used uninitialized in this function" } */
{
i = 2;
bar (i);
}
#pragma omp taskloop /* { dg-bogus "is used uninitialized in this function" } */
for (j = 0; j < 10; j++)
{
k = 7;
bar (k);
}
#pragma omp task firstprivate (l) /* { dg-warning "is used uninitialized in this function" } */
{
l = 2;
bar (l);
}
#pragma omp taskloop firstprivate (m) /* { dg-warning "is used uninitialized in this function" } */
for (j = 0; j < 10; j++)
{
m = 7;
bar (m);
}
#pragma omp task shared (n) /* { dg-bogus "is used uninitialized in this function" } */
{
n = 2;
bar (n);
}
#pragma omp taskloop shared (o) /* { dg-bogus "is used uninitialized in this function" } */
for (j = 0; j < 10; j++)
{
o = 7;
bar (o);
}
#pragma omp task private (p) /* { dg-bogus "is used uninitialized in this function" } */
{
p = 2;
bar (p);
}
#pragma omp taskloop shared (q) /* { dg-bogus "is used uninitialized in this function" } */
for (j = 0; j < 10; j++)
{
q = 7;
bar (q);
}
}
|
DRB090-static-local-orig-yes.c
|
/*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
For a variable declared in a scope inside an OpenMP construct:
private if the variable has an automatic storage duration
* shared if the variable has a static storage duration.
Dependence pairs:
tmp@73:5 vs. tmp@73:5
tmp@73:5 vs. tmp@74:12
*/
#include<stdio.h>
int main(int argc, char * argv[])
{
int i;
int len = 100;
int a[len], b[len];
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
b[i]=i;
}
/* static storage for a local variable */
{
static int tmp;
#pragma cetus private(i, tmp)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i, tmp)
for (i=0; i<len; i ++ )
{
tmp=(a[i]+i);
a[i]=tmp;
}
}
/* automatic storage for a local variable */
{
int tmp;
#pragma cetus private(i, tmp)
#pragma loop name main#2
#pragma cetus parallel
#pragma omp parallel for private(i, tmp)
for (i=0; i<len; i ++ )
{
tmp=(b[i]+i);
b[i]=tmp;
}
}
printf("a[50]=%d b[50]=%d\n", a[50], b[50]);
_ret_val_0=0;
return _ret_val_0;
}
|
cpl_imagelist_io-test.c
|
/*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include "cpl_imagelist_io.h"
#include "cpl_test.h"
#include "cpl_fits.h"
#include "cpl_tools.h"
/*-----------------------------------------------------------------------------
Define
-----------------------------------------------------------------------------*/
#ifndef IMAGESZ
#define IMAGESZ 127
#endif
#ifndef NIMAGES
#define NIMAGES 10
#endif
#define FILENAME "cpl_imagelist_test.fits"
/*-----------------------------------------------------------------------------
Static functions
-----------------------------------------------------------------------------*/
static void cpl_imagelist_save_compression_test(void);
static void cpl_imagelist_save_compression_bench(cpl_size, cpl_size,
cpl_size, cpl_size);
/*-----------------------------------------------------------------------------
Main
-----------------------------------------------------------------------------*/
int main(void)
{
const char * file = FILENAME;
cpl_imagelist * imlist;
cpl_imagelist * imlist2;
cpl_imagelist * imlista;
cpl_imagelist * nulllist;
cpl_image * image;
cpl_image * nullimg;
double flags[NIMAGES];
cpl_vector * eraser;
cpl_size i;
FILE * stream;
int next;
#if IMAGESZ > 127
const cpl_size boxsize = 5;
#elif IMAGESZ < 25
/* FIXME: Will the tests pass with this ? */
const cpl_size boxsize = 1;
#else
const cpl_size boxsize = IMAGESZ / 25;
#endif
cpl_error_code error;
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
stream = cpl_msg_get_level() > CPL_MSG_INFO
? fopen("/dev/null", "a") : stdout;
/* Insert tests below */
cpl_test_nonnull(stream);
eraser = cpl_vector_wrap(NIMAGES, flags);
cpl_msg_info("", "Checking various failures");
image = cpl_image_fill_test_create(IMAGESZ, IMAGESZ);
cpl_test_nonnull( image );
/* Test cpl_imagelist_new() */
imlist = cpl_imagelist_new();
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_get_size(imlist) );
cpl_test_eq( cpl_imagelist_is_uniform(imlist), 1);
/* Failure tests involving NULL end empty lists */
i = cpl_imagelist_get_size(NULL);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test_eq( i, -1 );
i = cpl_imagelist_is_uniform(NULL);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test( i < 0);
error = cpl_imagelist_set(NULL, image, 0);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT );
error = cpl_imagelist_set(imlist, NULL, 0);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT );
error = cpl_imagelist_set(imlist, image, -1);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT );
error = cpl_imagelist_set(imlist, image, 1);
cpl_test_eq_error( error, CPL_ERROR_ACCESS_OUT_OF_RANGE );
nullimg = cpl_imagelist_get(NULL, 0);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test_null( nullimg );
nullimg = cpl_imagelist_get(imlist, -1);
cpl_test_error( CPL_ERROR_ILLEGAL_INPUT );
cpl_test_null( nullimg );
nullimg = cpl_imagelist_get(imlist, 0);
cpl_test_error( CPL_ERROR_ACCESS_OUT_OF_RANGE );
cpl_test_null( nullimg );
error = cpl_imagelist_erase(imlist, NULL);
cpl_test_eq_error( error, CPL_ERROR_NULL_INPUT );
/* The elements of eraser are not initialized at this point,
but they are also not supposed to be accessed */
error = cpl_imagelist_erase(NULL, eraser);
cpl_test_eq_error( error, CPL_ERROR_NULL_INPUT );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error( error, CPL_ERROR_INCOMPATIBLE_INPUT );
nulllist = cpl_imagelist_duplicate(NULL);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test_null( nulllist );
error = cpl_imagelist_save(imlist, NULL, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE);
cpl_test_eq_error( error, CPL_ERROR_NULL_INPUT );
error = cpl_imagelist_save(NULL, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE);
cpl_test_eq_error( error, CPL_ERROR_NULL_INPUT );
error = cpl_imagelist_save(imlist, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE);
cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT );
nulllist = cpl_imagelist_load(NULL, CPL_TYPE_INT, 0);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test_null( nulllist );
nulllist = cpl_imagelist_load(".", CPL_TYPE_INT, 0);
cpl_test_error( CPL_ERROR_FILE_IO );
cpl_test_null( nulllist );
nulllist = cpl_imagelist_load_window(NULL, CPL_TYPE_INT, 0, 1, 1, 2, 2);
cpl_test_error( CPL_ERROR_NULL_INPUT );
cpl_test_null( nulllist );
nulllist = cpl_imagelist_load_window(file, CPL_TYPE_INT, -1, 1, 1, 2, 2);
cpl_test_error( CPL_ERROR_ILLEGAL_INPUT );
cpl_test_null( nulllist );
nulllist = cpl_imagelist_load_window(".", CPL_TYPE_INT, 0, 1, 1, 2, 2);
cpl_test_error( CPL_ERROR_FILE_IO );
cpl_test_null( nulllist );
/* Test cpl_imagelist_duplicate() of empty list */
imlist2 = cpl_imagelist_duplicate(imlist);
cpl_test_nonnull( imlist2 );
cpl_test_zero( cpl_imagelist_get_size(imlist2) );
cpl_test_eq( cpl_imagelist_is_uniform(imlist2), 1);
cpl_imagelist_empty(imlist2); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist2);
cpl_msg_info("", "Create an image list of %d images", NIMAGES);
/* Test cpl_imagelist_set() */
for (i=0; i < NIMAGES; i++) {
cpl_image * copy = cpl_image_fill_test_create(IMAGESZ, IMAGESZ);
flags[i] = i % 2 ? 1.0 : -1.0;
error = cpl_imagelist_set(imlist, copy, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), i+1 );
cpl_test_eq_ptr( cpl_imagelist_get(imlist, i), copy );
/* Insert image twice, last image will remain twice in list */
error = cpl_imagelist_set(imlist, copy, i + 1);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
cpl_image_delete(image);
(void)cpl_imagelist_unset(imlist, NIMAGES);
/* No real testing, but at least make use of
cpl_imagelist_dump_{structure, window} */
cpl_imagelist_dump_structure(imlist, stream);
cpl_imagelist_dump_window(imlist,
(IMAGESZ / 2) - boxsize,
(IMAGESZ / 2) - boxsize,
(IMAGESZ / 2) + boxsize,
(IMAGESZ / 2) + boxsize,
stream);
cpl_msg_info("", "Cast the image list");
error = cpl_imagelist_cast(NULL, NULL, CPL_TYPE_UNSPECIFIED);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT);
error = cpl_imagelist_cast(NULL, imlist, CPL_TYPE_UNSPECIFIED);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT);
error = cpl_imagelist_cast(imlist, imlist, CPL_TYPE_UNSPECIFIED);
cpl_test_eq_error(error, CPL_ERROR_INCOMPATIBLE_INPUT);
imlist2 = cpl_imagelist_new();
error = cpl_imagelist_cast(imlist2, imlist, CPL_TYPE_INVALID);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
cpl_test_zero( cpl_imagelist_get_size(imlist2));
error = cpl_imagelist_cast(imlist2, imlist, CPL_TYPE_UNSPECIFIED);
cpl_test_eq_error(error, CPL_ERROR_INVALID_TYPE);
cpl_test_zero( cpl_imagelist_get_size(imlist2));
error = cpl_imagelist_cast(imlist2, imlist, CPL_TYPE_FLOAT);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
error = cpl_imagelist_cast(imlist2, NULL, CPL_TYPE_INT);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
error = cpl_imagelist_cast(imlist2, imlist, CPL_TYPE_UNSPECIFIED);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), 2 * NIMAGES );
/* Test a cast with a shared image */
error = cpl_imagelist_set(imlist2, cpl_imagelist_get(imlist2, 0),
cpl_imagelist_get_size(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), 2 * NIMAGES + 1);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_cast(imlist2, NULL, CPL_TYPE_FLOAT_COMPLEX);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), 2 * NIMAGES + 1);
error = cpl_imagelist_cast(imlist2, NULL, CPL_TYPE_FLOAT);
cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH);
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), 2 * NIMAGES + 1);
cpl_imagelist_delete(imlist2);
cpl_msg_info("", "Duplicate the image list");
imlist2 = cpl_imagelist_duplicate(imlist);
cpl_test_nonnull( imlist2 );
cpl_test_zero( cpl_imagelist_is_uniform(imlist2));
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
/* Failure tests involving non-empty, valid list */
error = cpl_imagelist_save(imlist2, ".", CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE);
cpl_test_eq_error( error, CPL_ERROR_FILE_IO );
remove(file);
error = cpl_imagelist_save(imlist2, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_APPEND);
cpl_test_eq_error( error, CPL_ERROR_FILE_IO );
error = cpl_imagelist_save(imlist2, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE | CPL_IO_EXTEND);
cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT );
error = cpl_imagelist_save(imlist2, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE | CPL_IO_EXTEND | CPL_IO_APPEND);
cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT );
image = cpl_image_new(IMAGESZ, IMAGESZ, CPL_TYPE_INT);
cpl_test_nonnull( image );
error = cpl_imagelist_set(imlist2, image, 0);
cpl_test_eq_error( error, CPL_ERROR_TYPE_MISMATCH );
cpl_test_zero( cpl_imagelist_is_uniform(imlist2) );
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
cpl_image_delete(image);
image = cpl_image_new(IMAGESZ/2, IMAGESZ, CPL_TYPE_INT);
cpl_test_nonnull( image );
error = cpl_imagelist_set(imlist2, image, 0);
cpl_test_eq_error( error, CPL_ERROR_INCOMPATIBLE_INPUT );
cpl_test_zero( cpl_imagelist_is_uniform(imlist2) );
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
nullimg = cpl_imagelist_get(imlist2, NIMAGES+1);
cpl_test_error( CPL_ERROR_ACCESS_OUT_OF_RANGE );
cpl_test_null( nullimg );
cpl_test_zero( cpl_imagelist_is_uniform(imlist2) );
cpl_test_eq( cpl_imagelist_get_size(imlist2), NIMAGES );
cpl_imagelist_empty(imlist2); /* Test cpl_imagelist_unset() */
/* Imagelist with 1 image */
error = cpl_imagelist_set(imlist2, image, 0);
cpl_test_eq_error( error, CPL_ERROR_NONE );
/* Must be allowed to replace it w. image of different size/type */
image = cpl_image_new(IMAGESZ, IMAGESZ/2, CPL_TYPE_DOUBLE);
cpl_test_nonnull( image );
error = cpl_imagelist_set(imlist2, image, 0);
cpl_test_eq_error( error, CPL_ERROR_NONE );
cpl_imagelist_delete(imlist2);
/* Normal conditions */
/* Create double-length list, with images inserted twice */
imlista = cpl_imagelist_duplicate(imlist);
for (i = 0; i < NIMAGES; i++) {
image = cpl_imagelist_get(imlista, i);
error = cpl_imagelist_set(imlista, image, i + NIMAGES);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlista) );
cpl_test_eq( cpl_imagelist_get_size(imlista), i+1 + NIMAGES );
cpl_test_eq_ptr( cpl_imagelist_get_const(imlista, i + NIMAGES),
cpl_imagelist_get_const(imlista, i) );
}
/* Test cpl_imagelist_save() */
cpl_msg_info("", "Save the image list to %s", file);
error = cpl_imagelist_save(imlist, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_CREATE);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_fits(file);
cpl_test_zero( cpl_fits_count_extensions(file));
next = 0;
error = cpl_imagelist_save(imlist, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_EXTEND);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_fits(file);
cpl_test_eq( cpl_fits_count_extensions(file), ++next);
/* cpl_image_load(): Test loading of NAXIS=3 data (DFS09929) */
image = cpl_image_load(file, CPL_TYPE_UNSPECIFIED, 0, 0);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_image_abs(image, cpl_imagelist_get_const(imlist, 0),
15.0 * FLT_EPSILON);
cpl_image_delete(image);
/* Test cpl_imagelist_load() handling of negative extension */
nulllist = cpl_imagelist_load(file, CPL_TYPE_INT, -1);
cpl_test_error( CPL_ERROR_ILLEGAL_INPUT );
cpl_test_null( nulllist );
/* Append error test */
error = cpl_imagelist_save(imlist, file, CPL_TYPE_DOUBLE, NULL,
CPL_IO_APPEND);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
image = cpl_image_new(IMAGESZ+1,IMAGESZ+1, CPL_TYPE_FLOAT);
imlist2 = cpl_imagelist_new();
cpl_imagelist_set(imlist2, image, 0);
error = cpl_imagelist_save(imlist2, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_APPEND);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
cpl_imagelist_delete(imlist2);
/* Tests cpl_imagelist_load() for no-casting type */
imlist2 = cpl_imagelist_load(file, CPL_TYPE_UNSPECIFIED, 0);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(imlist2);
cpl_test_imagelist_abs(imlist, imlist2, 10.0 * FLT_EPSILON);
cpl_imagelist_delete(imlist2);
error = cpl_imagelist_save(imlist, file, CPL_TYPE_FLOAT, NULL,
CPL_IO_APPEND);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_fits(file);
cpl_test_eq( cpl_fits_count_extensions(file), next);
imlist2 = cpl_imagelist_load(file, CPL_TYPE_UNSPECIFIED, 1);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(imlist2);
cpl_test_imagelist_abs(imlista, imlist2, 10.0 * FLT_EPSILON);
cpl_imagelist_delete(imlista);
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Tests cpl_imagelist_load() for no-casting type */
imlist = cpl_imagelist_load(file, CPL_TYPE_UNSPECIFIED, 0);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(imlist);
image = cpl_imagelist_get(imlist, 0);
cpl_test_eq(cpl_image_get_type(image), CPL_TYPE_FLOAT);
cpl_imagelist_delete(imlist);
imlist = cpl_imagelist_load_window(file, CPL_TYPE_UNSPECIFIED, 0,
IMAGESZ/4, IMAGESZ/4,
3*IMAGESZ/4, 3*IMAGESZ/4);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(imlist);
image = cpl_imagelist_get(imlist, 0);
cpl_test_eq(cpl_image_get_type(image), CPL_TYPE_FLOAT);
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load() */
cpl_msg_info("", "Load image list as type DOUBLE");
imlist = cpl_imagelist_load(file, CPL_TYPE_DOUBLE, 0);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_set(imlist, cpl_imagelist_get(imlist, 0), 2);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load() */
cpl_msg_info("", "Load image list as type FLOAT");
imlist = cpl_imagelist_load(file, CPL_TYPE_FLOAT, 0);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load() */
cpl_msg_info("", "Load image list as type INTEGER");
imlist = cpl_imagelist_load(file, CPL_TYPE_INT, 0);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load_window() */
cpl_msg_info("", "Load image list as type DOUBLE from a window");
imlist = cpl_imagelist_load_window(file, CPL_TYPE_DOUBLE, 0, IMAGESZ/4,
IMAGESZ/4, 3*IMAGESZ/4, 3*IMAGESZ/4);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load_window() */
cpl_msg_info("", "Load image list as type FLOAT from a window");
imlist = cpl_imagelist_load_window(file, CPL_TYPE_FLOAT, 0, IMAGESZ/4,
IMAGESZ/4, 3*IMAGESZ/4, 3*IMAGESZ/4);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
/* Test cpl_imagelist_load_window() */
cpl_msg_info("", "Load image list as type INTEGER from a window");
imlist = cpl_imagelist_load_window(file, CPL_TYPE_INT, 0, IMAGESZ/4,
IMAGESZ/4, 3*IMAGESZ/4, 3*IMAGESZ/4);
cpl_test_nonnull( imlist );
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( cpl_imagelist_get_size(imlist), NIMAGES );
error = cpl_imagelist_erase(imlist, eraser);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_zero( cpl_imagelist_is_uniform(imlist) );
cpl_test_eq( 2*cpl_imagelist_get_size(imlist), NIMAGES );
/* Clean up */
cpl_imagelist_empty(imlist); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist);
cpl_imagelist_empty(imlist2); /* Test cpl_imagelist_unset() */
cpl_imagelist_delete(imlist2);
cpl_vector_unwrap(eraser);
cpl_imagelist_save_compression_test();
if (cpl_msg_get_level() <= CPL_MSG_INFO) {
cpl_imagelist_save_compression_bench(256, 256, 12, 256);
} else {
cpl_imagelist_save_compression_bench(IMAGESZ, IMAGESZ, 32, 3);
}
cpl_test_zero( remove(FILENAME) );
if (stream != stdout) cpl_test_zero( fclose(stream) );
/* End of tests */
return cpl_test_end(0);
}
/*----------------------------------------------------------------------------*/
/**
@brief Test saving with compression
@return void
@see cpl_imagelist_save()
*/
/*----------------------------------------------------------------------------*/
static void cpl_imagelist_save_compression_test(void)
{
/* Default CFITSIO quantization parameter for lossy floating point
compression is q = 1, reducing precision to 4.08 % */
const double prec = 0.0408;
const double maxval = 255.0;
const cpl_type imtypes[] = {CPL_TYPE_DOUBLE, CPL_TYPE_FLOAT,
CPL_TYPE_INT};
const size_t ntyp = sizeof(imtypes)/sizeof(imtypes[0]);
size_t ityp;
/* Saving with different image types, bitpix and compression types */
for (ityp = 0; ityp < ntyp; ityp++)
{
const cpl_type bpps[] = {CPL_TYPE_UCHAR, CPL_TYPE_SHORT,
CPL_TYPE_USHORT, CPL_TYPE_INT,
CPL_TYPE_FLOAT, CPL_TYPE_DOUBLE,
CPL_TYPE_UNSPECIFIED};
const size_t nbpp = sizeof(bpps)/sizeof(bpps[0]);
size_t ibpp;
const cpl_type imtype = imtypes[ityp];
cpl_image * img = cpl_image_new(IMAGESZ, IMAGESZ, imtype);
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_error_code error;
error = cpl_image_fill_noise_uniform(img, 0.0, maxval);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(imglist, img, 0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
for (ibpp = 0; ibpp < nbpp; ibpp++)
{
const cpl_io_type comp_meths[] = {CPL_IO_COMPRESS_GZIP,
CPL_IO_COMPRESS_RICE,
CPL_IO_COMPRESS_HCOMPRESS,
CPL_IO_COMPRESS_PLIO};
const size_t ncomp = sizeof(comp_meths)/sizeof(comp_meths[0]);
size_t icomp;
const int bitpix =
cpl_tools_get_bpp(bpps[ibpp] == CPL_TYPE_UNSPECIFIED
? imtype : bpps[ibpp]);
int ext = 0;
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_CREATE);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Tests with compression */
for(icomp = 0; icomp < ncomp; icomp++)
{
const cpl_io_type comp_method = comp_meths[icomp];
/* The compression method flag must be non-zero */
cpl_test(comp_method);
/* Saving with compression in non supported combinations */
error = cpl_imagelist_save(NULL, FILENAME, bpps[ibpp], NULL,
CPL_IO_EXTEND | comp_method);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT);
/* Compression is only supported when adding new extensions,
* not creating a new file and saving data in the main header */
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_CREATE | comp_method);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_APPEND | comp_method);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
for(size_t icomp2 = 0; icomp2 < icomp; icomp2++) {
const cpl_io_type comp_method2 = comp_meths[icomp2];
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_EXTEND | comp_method
| comp_method2);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_APPEND | comp_method
| comp_method2);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
for(size_t icomp3 = 0; icomp3 < icomp2; icomp3++) {
const cpl_io_type comp_method3 = comp_meths[icomp3];
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_EXTEND | comp_method
| comp_method2 | comp_method3);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_APPEND | comp_method
| comp_method2 | comp_method3);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
}
}
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_EXTEND | CPL_IO_COMPRESS_GZIP |
CPL_IO_COMPRESS_RICE|
CPL_IO_COMPRESS_HCOMPRESS |
CPL_IO_COMPRESS_PLIO);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_APPEND | CPL_IO_COMPRESS_GZIP |
CPL_IO_COMPRESS_RICE|
CPL_IO_COMPRESS_HCOMPRESS |
CPL_IO_COMPRESS_PLIO);
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp], NULL,
CPL_IO_EXTEND | comp_method);
if (
#ifndef CPL_IO_COMPRESSION_LOSSY
/* Currently, compression only allowed with integer data */
bitpix < 0 ||
#endif
/* FP-data compresses only to int, float or double */
/* RICE-compression supports only int, float or double */
/* In fact, this applies to all compressions... */
abs(bitpix) < 32) {
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
} else {
cpl_image * img_load;
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_fits(FILENAME);
ext++;
cpl_test_eq( cpl_fits_count_extensions(FILENAME), ext);
img_load = cpl_image_load(FILENAME, CPL_TYPE_UNSPECIFIED,
0, ext);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_eq(cpl_image_get_size_x(img_load), IMAGESZ);
cpl_test_eq(cpl_image_get_size_y(img_load), IMAGESZ);
if (imtype == CPL_TYPE_INT) {
cpl_test_image_abs(img, img_load, 0.0);
} else if (cpl_image_get_type(img_load) != CPL_TYPE_INT) {
cpl_test_image_abs(img, img_load, prec * maxval);
}
cpl_image_delete(img_load);
/* Can (currently) not insert images into an existing,
compressed cube */
error = cpl_imagelist_save(imglist, FILENAME, bpps[ibpp],
NULL, CPL_IO_APPEND);
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
}
}
}
cpl_imagelist_delete(imglist);
}
}
/*----------------------------------------------------------------------------*/
/**
@brief Benchmark saving with compression
@param nx NX
@param ny NY
@param nz NZ
@param nr Number of repetitions
@return void
@see cpl_image_save()
*/
/*----------------------------------------------------------------------------*/
static void cpl_imagelist_save_compression_bench(cpl_size nx, cpl_size ny,
cpl_size nz, cpl_size nr)
{
cpl_image * img = cpl_image_new(nx, ny, CPL_TYPE_INT);
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_error_code error;
cpl_size i;
double tsum = 0.0;
error = cpl_image_fill_noise_uniform(img, 0.0, 255.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
for (i = 0; i < nz; i++) {
error = cpl_imagelist_set(imglist, img, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
error = cpl_image_save(NULL, FILENAME, CPL_TYPE_UNSPECIFIED, NULL,
CPL_IO_CREATE);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_save(imglist, FILENAME, CPL_TYPE_UNSPECIFIED,
NULL, CPL_IO_EXTEND | CPL_IO_COMPRESS_RICE);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_fits(FILENAME);
cpl_test_eq( cpl_fits_count_extensions(FILENAME), 1);
#ifdef _OPENMP
#pragma omp parallel for private(i) reduction(+ : tsum)
#endif
for (i = 0; i < nr; i++) {
const double t0 = cpl_test_get_walltime();
cpl_imagelist * imgload = cpl_imagelist_load(FILENAME,
CPL_TYPE_UNSPECIFIED, 1);
const double t1 = cpl_test_get_walltime() - t0;
int j;
cpl_test_error(CPL_ERROR_NONE);
cpl_test_eq(cpl_imagelist_get_size(imgload), nz);
tsum += t1;
for (j = 0; j < nz; j++) {
cpl_test_image_abs(cpl_imagelist_get_const(imgload, j), img, 0.0);
}
cpl_imagelist_delete(imgload);
}
cpl_msg_info(cpl_func, "Time to load Rice-compressed image list %d X %d "
"X %d %d times [s]: %g", (int)nx, (int)ny, (int)nz, (int)nr,
tsum);
cpl_imagelist_unwrap(imglist);
cpl_image_delete(img);
}
|
convolution_3x3_pack4to1.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4to1_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4a-inch/4a-64-outch;
#if __aarch64__
kernel_tm_pack4.create(8 * inch / 4, 64, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)4u * 4, 4);
#else
kernel_tm_pack4.create(4 * inch / 4, 64, outch / 4 + outch % 4, (size_t)4u * 4, 4);
#endif
int p = 0;
#if __aarch64__
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack4.channel(p / 8);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int q = 0; q + 3 < inch; q += 4)
{
const float* k00 = k0.row(q);
const float* k01 = k0.row(q + 1);
const float* k02 = k0.row(q + 2);
const float* k03 = k0.row(q + 3);
const float* k10 = k1.row(q);
const float* k11 = k1.row(q + 1);
const float* k12 = k1.row(q + 2);
const float* k13 = k1.row(q + 3);
const float* k20 = k2.row(q);
const float* k21 = k2.row(q + 1);
const float* k22 = k2.row(q + 2);
const float* k23 = k2.row(q + 3);
const float* k30 = k3.row(q);
const float* k31 = k3.row(q + 1);
const float* k32 = k3.row(q + 2);
const float* k33 = k3.row(q + 3);
const float* k40 = k4.row(q);
const float* k41 = k4.row(q + 1);
const float* k42 = k4.row(q + 2);
const float* k43 = k4.row(q + 3);
const float* k50 = k5.row(q);
const float* k51 = k5.row(q + 1);
const float* k52 = k5.row(q + 2);
const float* k53 = k5.row(q + 3);
const float* k60 = k6.row(q);
const float* k61 = k6.row(q + 1);
const float* k62 = k6.row(q + 2);
const float* k63 = k6.row(q + 3);
const float* k70 = k7.row(q);
const float* k71 = k7.row(q + 1);
const float* k72 = k7.row(q + 2);
const float* k73 = k7.row(q + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00 += 32;
}
}
}
#endif // __aarch64__
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
#if __aarch64__
Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4);
#else
Mat g0 = kernel_tm_pack4.channel(p / 4);
#endif
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int q = 0; q + 3 < inch; q += 4)
{
const float* k00 = k0.row(q);
const float* k01 = k0.row(q + 1);
const float* k02 = k0.row(q + 2);
const float* k03 = k0.row(q + 3);
const float* k10 = k1.row(q);
const float* k11 = k1.row(q + 1);
const float* k12 = k1.row(q + 2);
const float* k13 = k1.row(q + 3);
const float* k20 = k2.row(q);
const float* k21 = k2.row(q + 1);
const float* k22 = k2.row(q + 2);
const float* k23 = k2.row(q + 3);
const float* k30 = k3.row(q);
const float* k31 = k3.row(q + 1);
const float* k32 = k3.row(q + 2);
const float* k33 = k3.row(q + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k01[k];
g00[5] = k11[k];
g00[6] = k21[k];
g00[7] = k31[k];
g00[8] = k02[k];
g00[9] = k12[k];
g00[10] = k22[k];
g00[11] = k32[k];
g00[12] = k03[k];
g00[13] = k13[k];
g00[14] = k23[k];
g00[15] = k33[k];
g00 += 16;
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
#if __aarch64__
Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
Mat g0 = kernel_tm_pack4.channel(p / 4 + p % 4);
#endif
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int q = 0; q + 3 < inch; q += 4)
{
const float* k00 = k0.row(q);
const float* k01 = k0.row(q + 1);
const float* k02 = k0.row(q + 2);
const float* k03 = k0.row(q + 3);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k02[k];
g00[3] = k03[k];
g00 += 4;
}
}
}
}
static void conv3x3s1_winograd64_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_pack4_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"sub %0, %0, #128 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #64 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d16-d19}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d20-d23}, [%0 :128] \n"
"sub %0, %0, #96 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"vst1.f32 {d22-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128] \n"
"sub %0, %0, #32 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
"st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n"
"st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n"
"st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n"
"st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"st1 {v20.4s}, [%5], #16 \n"
"st1 {v21.4s}, [%6], #16 \n"
"st1 {v22.4s}, [%7], #16 \n"
"st1 {v23.4s}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4s}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"st1 {v16.s}[0], [%1], #4 \n"
"st1 {v16.s}[1], [%2], #4 \n"
"st1 {v16.s}[2], [%3], #4 \n"
"st1 {v16.s}[3], [%4], #4 \n"
"st1 {v17.s}[0], [%5], #4 \n"
"st1 {v17.s}[1], [%6], #4 \n"
"st1 {v17.s}[2], [%7], #4 \n"
"st1 {v17.s}[3], [%8], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
#if __aarch64__
const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
const Mat kernel01_tm = kernel_tm.channel(p / 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1]! \n"
"vst1.f32 {d20-d23}, [%2]! \n"
"vst1.f32 {d24-d27}, [%3]! \n"
"vst1.f32 {d28-d31}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"vst1.f32 {d20-d21}, [%3]! \n"
"vst1.f32 {d22-d23}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.s}[0], [%1], #4 \n"
"st1 {v8.s}[1], [%2], #4 \n"
"st1 {v8.s}[2], [%3], #4 \n"
"st1 {v8.s}[3], [%4], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16[0]}, [%1]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vst1.f32 {d17[0]}, [%3]! \n"
"vst1.f32 {d17[1]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d24-d31} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vst1.f32 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel0_tm.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < inch; q++)
{
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _k0 = vld1q_f32(kptr);
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
r0 += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
int nn_outch = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00_0 = vld1q_f32(k0);
float32x4_t _k01_0 = vld1q_f32(k0 + 4);
float32x4_t _k02_0 = vld1q_f32(k0 + 8);
float32x4_t _k10_0 = vld1q_f32(k0 + 12);
float32x4_t _k11_0 = vld1q_f32(k0 + 16);
float32x4_t _k12_0 = vld1q_f32(k0 + 20);
float32x4_t _k20_0 = vld1q_f32(k0 + 24);
float32x4_t _k21_0 = vld1q_f32(k0 + 28);
float32x4_t _k22_0 = vld1q_f32(k0 + 32);
float32x4_t _k00_1 = vld1q_f32(k1);
float32x4_t _k01_1 = vld1q_f32(k1 + 4);
float32x4_t _k02_1 = vld1q_f32(k1 + 8);
float32x4_t _k10_1 = vld1q_f32(k1 + 12);
float32x4_t _k11_1 = vld1q_f32(k1 + 16);
float32x4_t _k12_1 = vld1q_f32(k1 + 20);
float32x4_t _k20_1 = vld1q_f32(k1 + 24);
float32x4_t _k21_1 = vld1q_f32(k1 + 28);
float32x4_t _k22_1 = vld1q_f32(k1 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r00 r01 r02 r03
"fmul v16.4s, %10.4s, v0.4s \n"
"fmul v17.4s, %19.4s, v0.4s \n"
"fmul v18.4s, %10.4s, v1.4s \n"
"fmul v19.4s, %19.4s, v1.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2] \n" // r04 r05
"fmul v6.4s, %10.4s, v2.4s \n"
"fmul v7.4s, %19.4s, v2.4s \n"
"fmul v8.4s, %10.4s, v3.4s \n"
"fmul v9.4s, %19.4s, v3.4s \n"
"fmla v16.4s, %11.4s, v1.4s \n"
"fmla v17.4s, %20.4s, v1.4s \n"
"fmla v18.4s, %11.4s, v2.4s \n"
"fmla v19.4s, %20.4s, v2.4s \n"
"fmla v6.4s, %11.4s, v3.4s \n"
"fmla v7.4s, %20.4s, v3.4s \n"
"fmla v8.4s, %11.4s, v4.4s \n"
"fmla v9.4s, %20.4s, v4.4s \n"
"fmla v16.4s, %12.4s, v2.4s \n"
"fmla v17.4s, %21.4s, v2.4s \n"
"fmla v18.4s, %12.4s, v3.4s \n"
"fmla v19.4s, %21.4s, v3.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r10 r11 r12 r12
"fmla v6.4s, %12.4s, v4.4s \n"
"fmla v7.4s, %21.4s, v4.4s \n"
"fmla v8.4s, %12.4s, v5.4s \n"
"fmla v9.4s, %21.4s, v5.4s \n"
"fmla v16.4s, %13.4s, v0.4s \n"
"fmla v17.4s, %22.4s, v0.4s \n"
"fmla v18.4s, %13.4s, v1.4s \n"
"fmla v19.4s, %22.4s, v1.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4s, v5.4s}, [%3] \n" // r14 r15
"fmla v6.4s, %13.4s, v2.4s \n"
"fmla v7.4s, %22.4s, v2.4s \n"
"fmla v8.4s, %13.4s, v3.4s \n"
"fmla v9.4s, %22.4s, v3.4s \n"
"fmla v16.4s, %14.4s, v1.4s \n"
"fmla v17.4s, %23.4s, v1.4s \n"
"fmla v18.4s, %14.4s, v2.4s \n"
"fmla v19.4s, %23.4s, v2.4s \n"
"fmla v6.4s, %14.4s, v3.4s \n"
"fmla v7.4s, %23.4s, v3.4s \n"
"fmla v8.4s, %14.4s, v4.4s \n"
"fmla v9.4s, %23.4s, v4.4s \n"
"fmla v16.4s, %15.4s, v2.4s \n"
"fmla v17.4s, %24.4s, v2.4s \n"
"fmla v18.4s, %15.4s, v3.4s \n"
"fmla v19.4s, %24.4s, v3.4s \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n" // r20 r21 r22 r22
"fmla v6.4s, %15.4s, v4.4s \n"
"fmla v7.4s, %24.4s, v4.4s \n"
"fmla v8.4s, %15.4s, v5.4s \n"
"fmla v9.4s, %24.4s, v5.4s \n"
"fmla v16.4s, %16.4s, v0.4s \n"
"fmla v17.4s, %25.4s, v0.4s \n"
"fmla v18.4s, %16.4s, v1.4s \n"
"fmla v19.4s, %25.4s, v1.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4] \n" // r24 r25
"fmla v6.4s, %16.4s, v2.4s \n"
"fmla v7.4s, %25.4s, v2.4s \n"
"fmla v8.4s, %16.4s, v3.4s \n"
"fmla v9.4s, %25.4s, v3.4s \n"
"fmla v16.4s, %17.4s, v1.4s \n"
"fmla v17.4s, %26.4s, v1.4s \n"
"fmla v18.4s, %17.4s, v2.4s \n"
"fmla v19.4s, %26.4s, v2.4s \n"
"fmla v6.4s, %17.4s, v3.4s \n"
"fmla v7.4s, %26.4s, v3.4s \n"
"fmla v8.4s, %17.4s, v4.4s \n"
"fmla v9.4s, %26.4s, v4.4s \n"
"fmla v16.4s, %18.4s, v2.4s \n"
"fmla v17.4s, %27.4s, v2.4s \n"
"fmla v18.4s, %18.4s, v3.4s \n"
"fmla v19.4s, %27.4s, v3.4s \n"
"fmla v6.4s, %18.4s, v4.4s \n"
"fmla v7.4s, %27.4s, v4.4s \n"
"fmla v8.4s, %18.4s, v5.4s \n"
"fmla v9.4s, %27.4s, v5.4s \n"
"ld1 {v0.4s}, [%0] \n" // sum00 sum01 sum02 sum03
"ld1 {v1.4s}, [%1] \n" // sum10 sum11 sum12 sum13
"faddp v16.4s, v16.4s, v16.4s \n"
"faddp v17.4s, v17.4s, v17.4s \n"
"faddp v18.4s, v18.4s, v18.4s \n"
"faddp v19.4s, v19.4s, v19.4s \n"
"faddp v6.4s, v6.4s, v6.4s \n"
"faddp v7.4s, v7.4s, v7.4s \n"
"faddp v8.4s, v8.4s, v8.4s \n"
"faddp v9.4s, v9.4s, v9.4s \n"
"faddp v16.2s, v16.2s, v18.2s \n"
"faddp v17.2s, v17.2s, v19.2s \n"
"faddp v6.2s, v6.2s, v8.2s \n"
"faddp v7.2s, v7.2s, v9.2s \n"
"trn1 v16.2d, v16.2d, v6.2d \n"
"trn1 v17.2d, v17.2d, v7.2d \n"
"fadd v0.4s, v0.4s, v16.4s \n"
"fadd v1.4s, v1.4s, v17.4s \n"
"st1 {v0.4s}, [%0], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_0), // %10
"w"(_k01_0), // %11
"w"(_k02_0), // %12
"w"(_k10_0), // %13
"w"(_k11_0), // %14
"w"(_k12_0), // %15
"w"(_k20_0), // %16
"w"(_k21_0), // %17
"w"(_k22_0), // %18
"w"(_k00_1), // %19
"w"(_k01_1), // %20
"w"(_k02_1), // %21
"w"(_k10_1), // %22
"w"(_k11_1), // %23
"w"(_k12_1), // %24
"w"(_k20_1), // %25
"w"(_k21_1), // %26
"w"(_k22_1) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2] \n" // r00 r01 r02 r03
"fmul v16.4s, %10.4s, v0.4s \n"
"fmul v17.4s, %19.4s, v0.4s \n"
"fmul v18.4s, %10.4s, v1.4s \n"
"fmul v19.4s, %19.4s, v1.4s \n"
"fmla v16.4s, %11.4s, v1.4s \n"
"fmla v17.4s, %20.4s, v1.4s \n"
"fmla v18.4s, %11.4s, v2.4s \n"
"fmla v19.4s, %20.4s, v2.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n" // r10 r11 r12 r12
"fmla v16.4s, %12.4s, v2.4s \n"
"fmla v17.4s, %21.4s, v2.4s \n"
"fmla v18.4s, %12.4s, v3.4s \n"
"fmla v19.4s, %21.4s, v3.4s \n"
"fmla v16.4s, %13.4s, v4.4s \n"
"fmla v17.4s, %22.4s, v4.4s \n"
"fmla v18.4s, %13.4s, v5.4s \n"
"fmla v19.4s, %22.4s, v5.4s \n"
"fmla v16.4s, %14.4s, v5.4s \n"
"fmla v17.4s, %23.4s, v5.4s \n"
"fmla v18.4s, %14.4s, v6.4s \n"
"fmla v19.4s, %23.4s, v6.4s \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4] \n" // r20 r21 r22 r22
"fmla v16.4s, %15.4s, v6.4s \n"
"fmla v17.4s, %24.4s, v6.4s \n"
"fmla v18.4s, %15.4s, v7.4s \n"
"fmla v19.4s, %24.4s, v7.4s \n"
"fmla v16.4s, %16.4s, v0.4s \n"
"fmla v17.4s, %25.4s, v0.4s \n"
"fmla v18.4s, %16.4s, v1.4s \n"
"fmla v19.4s, %25.4s, v1.4s \n"
"fmla v16.4s, %17.4s, v1.4s \n"
"fmla v17.4s, %26.4s, v1.4s \n"
"fmla v18.4s, %17.4s, v2.4s \n"
"fmla v19.4s, %26.4s, v2.4s \n"
"fmla v16.4s, %18.4s, v2.4s \n"
"fmla v17.4s, %27.4s, v2.4s \n"
"fmla v18.4s, %18.4s, v3.4s \n"
"fmla v19.4s, %27.4s, v3.4s \n"
"ld1 {v4.2s}, [%0] \n" // sum00 sum01
"ld1 {v5.2s}, [%1] \n" // sum10 sum11
"faddp v16.4s, v16.4s, v16.4s \n"
"faddp v17.4s, v17.4s, v17.4s \n"
"faddp v18.4s, v18.4s, v18.4s \n"
"faddp v19.4s, v19.4s, v19.4s \n"
"add %2, %2, #32 \n"
"faddp v16.2s, v16.2s, v18.2s \n"
"faddp v17.2s, v17.2s, v19.2s \n"
"add %3, %3, #32 \n"
"fadd v4.2s, v4.2s, v16.2s \n"
"fadd v5.2s, v5.2s, v17.2s \n"
"add %4, %4, #32 \n"
"st1 {v4.2s}, [%0], #8 \n"
"st1 {v5.2s}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_0), // %10
"w"(_k01_0), // %11
"w"(_k02_0), // %12
"w"(_k10_0), // %13
"w"(_k11_0), // %14
"w"(_k12_0), // %15
"w"(_k20_0), // %16
"w"(_k21_0), // %17
"w"(_k22_0), // %18
"w"(_k00_1), // %19
"w"(_k01_1), // %20
"w"(_k02_1), // %21
"w"(_k10_1), // %22
"w"(_k11_1), // %23
"w"(_k12_1), // %24
"w"(_k20_1), // %25
"w"(_k21_1), // %26
"w"(_k22_1) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%2] \n" // r00 r01 r02
"fmul v16.4s, %10.4s, v0.4s \n"
"fmul v17.4s, %19.4s, v0.4s \n"
"fmul v18.4s, %11.4s, v1.4s \n"
"fmul v19.4s, %20.4s, v1.4s \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v3.4s, v4.4s, v5.4s}, [%3] \n" // r10 r11 r12
"fmla v16.4s, %12.4s, v2.4s \n"
"fmla v17.4s, %21.4s, v2.4s \n"
"fmla v18.4s, %13.4s, v3.4s \n"
"fmla v19.4s, %22.4s, v3.4s \n"
"fmla v16.4s, %14.4s, v4.4s \n"
"fmla v17.4s, %23.4s, v4.4s \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%4] \n" // r20 r21 r22
"fmla v18.4s, %15.4s, v5.4s \n"
"fmla v19.4s, %24.4s, v5.4s \n"
"fmla v16.4s, %16.4s, v0.4s \n"
"fmla v17.4s, %25.4s, v0.4s \n"
"fmla v18.4s, %17.4s, v1.4s \n"
"fmla v19.4s, %26.4s, v1.4s \n"
"fmla v16.4s, %18.4s, v2.4s \n"
"fmla v17.4s, %27.4s, v2.4s \n"
"ld1 {v3.s}[0], [%0] \n" // sum00
"ld1 {v4.s}[0], [%1] \n" // sum10
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"add %2, %2, #16 \n"
"faddp v16.4s, v16.4s, v16.4s \n"
"faddp v17.4s, v17.4s, v17.4s \n"
"add %3, %3, #16 \n"
"faddp v16.2s, v16.2s, v16.2s \n"
"faddp v17.2s, v17.2s, v17.2s \n"
"add %4, %4, #16 \n"
"fadd v3.2s, v3.2s, v16.2s \n"
"fadd v4.2s, v4.2s, v17.2s \n"
"st1 {v3.s}[0], [%0], #4 \n"
"st1 {v4.s}[0], [%1], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_0), // %10
"w"(_k01_0), // %11
"w"(_k02_0), // %12
"w"(_k10_0), // %13
"w"(_k11_0), // %14
"w"(_k12_0), // %15
"w"(_k20_0), // %16
"w"(_k21_0), // %17
"w"(_k22_0), // %18
"w"(_k00_1), // %19
"w"(_k01_1), // %20
"w"(_k02_1), // %21
"w"(_k10_1), // %22
"w"(_k11_1), // %23
"w"(_k12_1), // %24
"w"(_k20_1), // %25
"w"(_k21_1), // %26
"w"(_k22_1) // %27
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19");
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out0.fill(bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k10 = vld1q_f32(k0 + 12);
float32x4_t _k11 = vld1q_f32(k0 + 16);
float32x4_t _k12 = vld1q_f32(k0 + 20);
float32x4_t _k20 = vld1q_f32(k0 + 24);
float32x4_t _k21 = vld1q_f32(k0 + 28);
float32x4_t _k22 = vld1q_f32(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07
"fmul v16.4s, %8.4s, v0.4s \n"
"fmul v17.4s, %8.4s, v1.4s \n"
"fmul v18.4s, %8.4s, v2.4s \n"
"fmul v19.4s, %8.4s, v3.4s \n"
"fmul v20.4s, %8.4s, v4.4s \n"
"fmul v21.4s, %8.4s, v5.4s \n"
"fmul v22.4s, %8.4s, v6.4s \n"
"fmul v23.4s, %8.4s, v7.4s \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n" // r08 r09
"fmla v16.4s, %9.4s, v1.4s \n"
"fmla v17.4s, %9.4s, v2.4s \n"
"fmla v18.4s, %9.4s, v3.4s \n"
"fmla v19.4s, %9.4s, v4.4s \n"
"fmla v20.4s, %9.4s, v5.4s \n"
"fmla v21.4s, %9.4s, v6.4s \n"
"fmla v22.4s, %9.4s, v7.4s \n"
"fmla v23.4s, %9.4s, v8.4s \n"
"fmla v16.4s, %10.4s, v2.4s \n"
"fmla v17.4s, %10.4s, v3.4s \n"
"fmla v18.4s, %10.4s, v4.4s \n"
"fmla v19.4s, %10.4s, v5.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v20.4s, %10.4s, v6.4s \n"
"fmla v21.4s, %10.4s, v7.4s \n"
"fmla v22.4s, %10.4s, v8.4s \n"
"fmla v23.4s, %10.4s, v9.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v16.4s, %11.4s, v0.4s \n"
"fmla v17.4s, %11.4s, v1.4s \n"
"fmla v18.4s, %11.4s, v2.4s \n"
"fmla v19.4s, %11.4s, v3.4s \n"
"fmla v20.4s, %11.4s, v4.4s \n"
"fmla v21.4s, %11.4s, v5.4s \n"
"fmla v22.4s, %11.4s, v6.4s \n"
"fmla v23.4s, %11.4s, v7.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // r18 r19
"fmla v16.4s, %12.4s, v1.4s \n"
"fmla v17.4s, %12.4s, v2.4s \n"
"fmla v18.4s, %12.4s, v3.4s \n"
"fmla v19.4s, %12.4s, v4.4s \n"
"fmla v20.4s, %12.4s, v5.4s \n"
"fmla v21.4s, %12.4s, v6.4s \n"
"fmla v22.4s, %12.4s, v7.4s \n"
"fmla v23.4s, %12.4s, v8.4s \n"
"fmla v16.4s, %13.4s, v2.4s \n"
"fmla v17.4s, %13.4s, v3.4s \n"
"fmla v18.4s, %13.4s, v4.4s \n"
"fmla v19.4s, %13.4s, v5.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v20.4s, %13.4s, v6.4s \n"
"fmla v21.4s, %13.4s, v7.4s \n"
"fmla v22.4s, %13.4s, v8.4s \n"
"fmla v23.4s, %13.4s, v9.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v16.4s, %14.4s, v0.4s \n"
"fmla v17.4s, %14.4s, v1.4s \n"
"fmla v18.4s, %14.4s, v2.4s \n"
"fmla v19.4s, %14.4s, v3.4s \n"
"fmla v20.4s, %14.4s, v4.4s \n"
"fmla v21.4s, %14.4s, v5.4s \n"
"fmla v22.4s, %14.4s, v6.4s \n"
"fmla v23.4s, %14.4s, v7.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n" // r28 r29
"fmla v16.4s, %15.4s, v1.4s \n"
"fmla v17.4s, %15.4s, v2.4s \n"
"fmla v18.4s, %15.4s, v3.4s \n"
"fmla v19.4s, %15.4s, v4.4s \n"
"fmla v20.4s, %15.4s, v5.4s \n"
"fmla v21.4s, %15.4s, v6.4s \n"
"fmla v22.4s, %15.4s, v7.4s \n"
"fmla v23.4s, %15.4s, v8.4s \n"
"fmla v16.4s, %16.4s, v2.4s \n"
"fmla v17.4s, %16.4s, v3.4s \n"
"fmla v18.4s, %16.4s, v4.4s \n"
"fmla v19.4s, %16.4s, v5.4s \n"
"fmla v20.4s, %16.4s, v6.4s \n"
"fmla v21.4s, %16.4s, v7.4s \n"
"fmla v22.4s, %16.4s, v8.4s \n"
"fmla v23.4s, %16.4s, v9.4s \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n" // sum0 sum1 sum2 sum3 sum4 sum5 sum6 sum7
"faddp v16.4s, v16.4s, v17.4s \n"
"faddp v18.4s, v18.4s, v19.4s \n"
"faddp v20.4s, v20.4s, v21.4s \n"
"faddp v22.4s, v22.4s, v23.4s \n"
"faddp v16.4s, v16.4s, v18.4s \n"
"faddp v20.4s, v20.4s, v22.4s \n"
"fadd v0.4s, v0.4s, v16.4s \n"
"fadd v1.4s, v1.4s, v20.4s \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n" // r04 r05
"fmul v16.4s, %8.4s, v0.4s \n"
"fmul v17.4s, %8.4s, v1.4s \n"
"fmul v18.4s, %8.4s, v2.4s \n"
"fmul v19.4s, %8.4s, v3.4s \n"
"fmla v16.4s, %9.4s, v1.4s \n"
"fmla v17.4s, %9.4s, v2.4s \n"
"fmla v18.4s, %9.4s, v3.4s \n"
"fmla v19.4s, %9.4s, v8.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v2.4s \n"
"fmla v17.4s, %10.4s, v3.4s \n"
"fmla v18.4s, %10.4s, v8.4s \n"
"fmla v19.4s, %10.4s, v9.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // r14 r15
"fmla v16.4s, %11.4s, v4.4s \n"
"fmla v17.4s, %11.4s, v5.4s \n"
"fmla v18.4s, %11.4s, v6.4s \n"
"fmla v19.4s, %11.4s, v7.4s \n"
"fmla v16.4s, %12.4s, v5.4s \n"
"fmla v17.4s, %12.4s, v6.4s \n"
"fmla v18.4s, %12.4s, v7.4s \n"
"fmla v19.4s, %12.4s, v8.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v16.4s, %13.4s, v6.4s \n"
"fmla v17.4s, %13.4s, v7.4s \n"
"fmla v18.4s, %13.4s, v8.4s \n"
"fmla v19.4s, %13.4s, v9.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n" // r24 r25
"fmla v16.4s, %14.4s, v0.4s \n"
"fmla v17.4s, %14.4s, v1.4s \n"
"fmla v18.4s, %14.4s, v2.4s \n"
"fmla v19.4s, %14.4s, v3.4s \n"
"fmla v16.4s, %15.4s, v1.4s \n"
"fmla v17.4s, %15.4s, v2.4s \n"
"fmla v18.4s, %15.4s, v3.4s \n"
"fmla v19.4s, %15.4s, v8.4s \n"
"fmla v16.4s, %16.4s, v2.4s \n"
"fmla v17.4s, %16.4s, v3.4s \n"
"fmla v18.4s, %16.4s, v8.4s \n"
"fmla v19.4s, %16.4s, v9.4s \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"faddp v16.4s, v16.4s, v17.4s \n"
"faddp v18.4s, v18.4s, v19.4s \n"
"faddp v16.4s, v16.4s, v18.4s \n"
"fadd v0.4s, v0.4s, v16.4s \n"
"st1 {v0.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01
"vmul.f32 q3, %q8, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d4-d5}, [%1 :128]! \n" // r02
"vmul.f32 q4, %q8, q1 \n"
"vmla.f32 q3, %q9, q1 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n" // r03 r04
"vmul.f32 q5, %q8, q2 \n"
"vmla.f32 q4, %q9, q2 \n"
"vmla.f32 q3, %q10, q2 \n"
"vmul.f32 q6, %q8, q0 \n"
"vmla.f32 q5, %q9, q0 \n"
"vmla.f32 q4, %q10, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d4-d5}, [%1 :128] \n" // r05
"vmla.f32 q6, %q9, q1 \n"
"vmla.f32 q5, %q10, q1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11
"vmla.f32 q6, %q10, q2 \n"
"vmla.f32 q3, %q11, q0 \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2 :128]! \n" // r12
"vmla.f32 q4, %q11, q1 \n"
"vmla.f32 q3, %q12, q1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n" // r13 r14
"vmla.f32 q5, %q11, q2 \n"
"vmla.f32 q4, %q12, q2 \n"
"vmla.f32 q3, %q13, q2 \n"
"vmla.f32 q6, %q11, q0 \n"
"vmla.f32 q5, %q12, q0 \n"
"vmla.f32 q4, %q13, q0 \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2 :128] \n" // r15
"vmla.f32 q6, %q12, q1 \n"
"vmla.f32 q5, %q13, q1 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21
"vmla.f32 q6, %q13, q2 \n"
"vmla.f32 q3, %q14, q0 \n"
"pld [%3, #128] \n"
"vld1.f32 {d4-d5}, [%3 :128]! \n" // r22
"vmla.f32 q4, %q14, q1 \n"
"vmla.f32 q3, %q15, q1 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n" // r23 r24
"vmla.f32 q5, %q14, q2 \n"
"vmla.f32 q4, %q15, q2 \n"
"vmla.f32 q3, %q16, q2 \n"
"vmla.f32 q6, %q14, q0 \n"
"vmla.f32 q5, %q15, q0 \n"
"vmla.f32 q4, %q16, q0 \n"
"pld [%3, #128] \n"
"vld1.f32 {d4-d5}, [%3 :128] \n" // r25
"vmla.f32 q6, %q15, q1 \n"
"vmla.f32 q5, %q16, q1 \n"
"vld1.f32 {d0-d1}, [%0] \n" // sum0 sum1 sum2 sum3
"vmla.f32 q6, %q16, q2 \n"
"vadd.f32 d6, d6, d7 \n"
"vadd.f32 d8, d8, d9 \n"
"vadd.f32 d10, d10, d11 \n"
"vadd.f32 d12, d12, d13 \n"
"sub %1, %1, #16 \n"
"vpadd.f32 d6, d6, d8 \n"
"vpadd.f32 d7, d10, d12 \n"
"sub %2, %2, #16 \n"
"vadd.f32 q0, q0, q3 \n"
"sub %3, %3, #16 \n"
"vst1.f32 {d0-d1}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n" // r00 r01 r02 r03
"fmul v16.4s, %8.4s, v0.4s \n"
"fmul v17.4s, %8.4s, v1.4s \n"
"fmul v18.4s, %9.4s, v1.4s \n"
"fmul v19.4s, %9.4s, v2.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v2.4s \n"
"fmla v17.4s, %10.4s, v3.4s \n"
"fmla v18.4s, %11.4s, v4.4s \n"
"fmla v19.4s, %11.4s, v5.4s \n"
"fmla v16.4s, %12.4s, v5.4s \n"
"fmla v17.4s, %12.4s, v6.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n" // r20 r21 r22 r23
"fmla v18.4s, %13.4s, v6.4s \n"
"fmla v19.4s, %13.4s, v7.4s \n"
"fmla v16.4s, %14.4s, v0.4s \n"
"fmla v17.4s, %14.4s, v1.4s \n"
"fmla v18.4s, %15.4s, v1.4s \n"
"fmla v19.4s, %15.4s, v2.4s \n"
"fmla v16.4s, %16.4s, v2.4s \n"
"fmla v17.4s, %16.4s, v3.4s \n"
"ld1 {v0.2s}, [%0] \n" // sum0 sum1
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"add %1, %1, #32 \n"
"faddp v16.4s, v16.4s, v17.4s \n"
"add %2, %2, #32 \n"
"faddp v16.4s, v16.4s, v16.4s \n"
"add %3, %3, #32 \n"
"fadd v0.2s, v0.2s, v16.2s \n"
"st1 {v0.2s}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01
"vmul.f32 q5, %q8, q0 \n"
"vmul.f32 q6, %q8, q1 \n"
"vmul.f32 q2, %q9, q1 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n" // r02 r03
"vmul.f32 q3, %q9, q0 \n"
"vmla.f32 q5, %q10, q0 \n"
"vmla.f32 q6, %q10, q1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11
"vmla.f32 q2, %q11, q0 \n"
"vmla.f32 q3, %q11, q1 \n"
"vmla.f32 q5, %q12, q1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128] \n" // r12 r13
"vmla.f32 q6, %q12, q0 \n"
"vmla.f32 q2, %q13, q0 \n"
"vmla.f32 q3, %q13, q1 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21
"vmla.f32 q5, %q14, q0 \n"
"vmla.f32 q6, %q14, q1 \n"
"vmla.f32 q2, %q15, q1 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128] \n" // r22 r23
"vmla.f32 q3, %q15, q0 \n"
"vmla.f32 q5, %q16, q0 \n"
"vmla.f32 q6, %q16, q1 \n"
"vld1.f32 {d8}, [%0] \n" // sum0 sum1
"vadd.f32 q5, q5, q2 \n"
"vadd.f32 q6, q6, q3 \n"
"vadd.f32 d10, d10, d11 \n"
"vadd.f32 d12, d12, d13 \n"
"vpadd.f32 d10, d10, d12 \n"
"vadd.f32 d8, d8, d10 \n"
"vst1.f32 {d8}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02
"eor v16.16b, v16.16b, v16.16b \n"
"ld1 {v16.s}[0], [%0] \n" // sum0
"fmul v17.4s, %8.4s, v0.4s \n"
"fmul v18.4s, %9.4s, v1.4s \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12
"fmla v16.4s, %10.4s, v2.4s \n"
"fmla v17.4s, %11.4s, v3.4s \n"
"fmla v18.4s, %12.4s, v4.4s \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22
"fmla v16.4s, %13.4s, v5.4s \n"
"fmla v17.4s, %14.4s, v0.4s \n"
"fmla v18.4s, %15.4s, v1.4s \n"
"fmla v16.4s, %16.4s, v2.4s \n"
"fadd v17.4s, v17.4s, v18.4s \n"
"fadd v16.4s, v16.4s, v17.4s \n"
"add %1, %1, #16 \n"
"faddp v16.4s, v16.4s, v16.4s \n"
"add %2, %2, #16 \n"
"faddp v16.2s, v16.2s, v16.2s \n"
"add %3, %3, #16 \n"
"st1 {v16.s}[0], [%0], #4 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18");
#else // __aarch64__
asm volatile(
"pld [%1, #384] \n"
"vldm %1, {d0-d5} \n" // r00 r01 r02
"veor q3, q3 \n"
"vld1.f32 {d6[0]}, [%0] \n" // sum0
"vmul.f32 q4, %q8, q0 \n"
"vmul.f32 q5, %q9, q1 \n"
"vmla.f32 q3, %q10, q2 \n"
"pld [%2, #384] \n"
"vldm %2, {d0-d5} \n" // r10 r11 r12
"vmla.f32 q4, %q11, q0 \n"
"vmla.f32 q5, %q12, q1 \n"
"vmla.f32 q3, %q13, q2 \n"
"pld [%3, #384] \n"
"vldm %3, {d0-d5} \n" // r20 r21 r22
"vmla.f32 q4, %q14, q0 \n"
"vmla.f32 q5, %q15, q1 \n"
"vmla.f32 q3, %q16, q2 \n"
"vadd.f32 q4, q4, q5 \n"
"vadd.f32 q3, q3, q4 \n"
"add %1, %1, #16 \n"
"vadd.f32 d6, d6, d7 \n"
"add %2, %2, #16 \n"
"vpadd.f32 d6, d6, d6 \n"
"add %3, %3, #16 \n"
"vst1.f32 {d6[0]}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q3", "q4", "q5");
#endif // __aarch64__
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
k0 += 9 * 4;
}
}
}
|
jacobi-ompacc-opt1.c
|
// An optimization on top of naive coding:
// promoting data handling outside the while loop
#include <stdio.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 512
int n,m,mits;
#define REAL float // flexible between float and double
REAL tol,relax=1.0,alpha=0.0543;
REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
REAL dx,dy;
int main (void)
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
REAL omega;
int i,j,k;
REAL error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
// An optimization on top of naive coding: promoting data handling outside the while loop
// data properties may change since the scope is bigger:
#pragma omp target data map(in:n, m, omega, ax, ay, b, f[0:n][0:m]) map(inout:u[0:n][0:m]) map(alloc:uold[0:n][0:m])
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
// {
#pragma omp target //map(in:n, m, u[0:n][0:m]) map(out:uold[0:n][0:m])
#pragma omp parallel for private(j,i)
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma omp target //map(in:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(out:u[0:n][0:m])
#pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
// }
/* omp end parallel */
/* Error check */
if (k%500==0)
printf("Finished %d iteration with error =%f\n",k, error);
error = sqrt(error)/(n*m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
REAL xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
}
|
utils.h
|
#ifndef _UTILS_H_
#define _UTILS_H_
#include <glog/logging.h>
#include <mkldnn.hpp>
#include <iostream>
#include "op_param.h"
#include "omp.h"
using namespace mkldnn;
#define GET_PTR(t, p, offset) reinterpret_cast<t*>( reinterpret_cast<size_t>(p) +static_cast<size_t>(offset) )
memory::format get_desired_format(int channel);
memory::format get_desired_format_weight(int channel0, int channel1);
template<typename T>
void eltwise_multiply(T* x1, T* x2, T* y, size_t n) {
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < n; ++i) {
y[i] = x1[i] * x2[i];
}
}
//
//// map C type with mkldnn's
//// float -> memory::data_type::f32
//// int -> memory::data_type::s32
//// int16_t -> memory::data_type::s16
//// int8_t -> memory::data_type::s8
//// uint8_t -> memory::data_type::u8
//
template<typename T>
static inline mkldnn::memory::data_type memory_data_type() {
if (typeid(T) == typeid(float))
return mkldnn::memory::data_type::f32;
else if (typeid(T) == typeid(int))
return mkldnn::memory::data_type::s32;
else if (typeid(T) == typeid(int16_t))
return mkldnn::memory::data_type::s16;
else if (typeid(T) == typeid(int8_t))
return mkldnn::memory::data_type::s8;
else if (typeid(T) == typeid(uint8_t))
return mkldnn::memory::data_type::u8;
LOG(ERROR) << "Not support type";
return mkldnn::memory::data_type::data_undef;
}
// utils function conver int/double/bool/dims/ to string
static inline std::string int_to_string(int value) {
std::ostringstream os;
os << std::hex << "I" << value << "_";
return os.str();
}
static inline std::string double_to_string(double value) {
std::ostringstream os;
os << "D" << value << "_";
return os.str();
}
static inline std::string float_to_string(float value) {
std::ostringstream os;
os << "F" << value << "_";
return os.str();
}
static inline std::string bool_to_string(bool value) {
std::ostringstream os;
os << "B" << value << "_";
return os.str();
}
static inline std::string dims_to_string(mkldnn::memory::dims dims) {
std::ostringstream os;
os << "DIMS:";
for (unsigned int i = 0; i < dims.size(); i++)
os << dims[i] << ",";
os << ";";
return os.str();
}
static inline std::string long_to_string(size_t value) {
std::ostringstream os;
os << std::hex << "L" << value << "_";
return os.str();
}
static inline mkldnn::algorithm pooling_algo_convert(pooling_param_t::algorithm input) {
switch(input) {
case pooling_param_t::algorithm::pooling_max:
return mkldnn::pooling_max;
case pooling_param_t::algorithm::pooling_avg:
return mkldnn::pooling_avg;
case pooling_param_t::algorithm::pooling_avg_include_padding:
return mkldnn::pooling_avg_include_padding;
case pooling_param_t::algorithm::pooling_avg_exclude_padding:
return mkldnn::pooling_avg_exclude_padding;
default:
LOG(ERROR) << "Not a valid pooling algo";
return mkldnn::pooling_max;
}
}
static inline mkldnn::algorithm lrn_algo_convert(lrn_param_t::algorithm input) {
switch(input) {
case lrn_param_t::algorithm::lrn_across_channels:
return mkldnn::lrn_across_channels;
case lrn_param_t::algorithm::lrn_within_channel:
return mkldnn::lrn_within_channel;
default:
LOG(ERROR) << "Not a valid lrn algo";
return mkldnn::lrn_across_channels;
}
}
template<typename T, typename U>
inline T div_up(const T a, const U b) {
assert(b);
return(a + b - 1) / b;
}
template <typename T, typename U>
inline void balance211(T n, U team, U tid, T &n_start, T &n_end) {
T n_min = 1;
T &n_my = n_end;
if (team <= 1 || n == 0) {
n_start = 0;
n_my = n;
} else if (n_min == 1) {
// team = T1 + T2
// n = T1*n1 + T2*n2 (n1 - n2 = 1)
T n1 = div_up(n, (T)team);
T n2 = n1 - 1;
T T1 = n - n2 * (T)team;
n_my = (T)tid < T1 ? n1 : n2;
n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2;
}
n_end += n_start;
}
inline void fast_memcpy(char* data_o, char *data_i, size_t len)
{
size_t nelems_float = len / 4;
size_t nelems_char = len % 4;
const int block_size = 16;
const auto num_blocks_float = nelems_float / block_size;
const auto rem_elems_float = nelems_float % block_size;
float* output_f = (float*)data_o;
float* input_f = (float*) data_i;
char* output_c = (char*) data_o;
char* input_c = (char*) data_i;
# pragma omp parallel
{
const int ithr = omp_get_thread_num();
const int nthr = omp_get_num_threads();
size_t start{0}, end{0};
balance211(num_blocks_float, nthr, ithr, start, end);
start = start * block_size;
end = end * block_size;
# pragma omp simd
for (size_t e = start; e < end; ++e) {
output_f[e] = input_f[e];
}
if (rem_elems_float != 0 && ithr == nthr -1 ) {
for (auto e = nelems_float - rem_elems_float; e < nelems_float; ++e) {
output_f[e] = input_f[e];
}
}
if (nelems_char != 0 && ithr == nthr -1){
for (auto e = nelems_float*4; e < len; ++e) {
output_c[e] = input_c[e];
}
}
}
return;
}
#endif // _UTILS_H_
|
app.c
|
/**
* Christina Giannoula
* cgiannoula: [email protected]
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
*/
static struct DCOOMatrix* A;
static struct COOMatrix* B;
static val_dt* x;
static val_dt* y;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t rows_per_dpu;
uint32_t rows_per_dpu_pad;
uint32_t prev_rows_dpu;
uint32_t prev_nnz_dpu;
uint32_t nnz;
uint32_t nnz_pad;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_vert_partition
* @param factor n to create partitions
* @param vertical_partitions
* @param/return horz_partitions
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host
*/
static void spmv_host(val_dt* y, struct DCOOMatrix *A, val_dt* x) {
uint64_t total_nnzs = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t c = 0; c < A->vert_partitions; c++) {
uint32_t p = r * A->vert_partitions + c;
for(uint32_t n = 0; n < A->nnzs_per_partition[p]; n++) {
uint32_t rowIndx = A->nnzs[total_nnzs].rowind;
uint32_t colIndx = A->nnzs[total_nnzs].colind;
val_dt value = A->nnzs[total_nnzs++].val;
y[r * A->tile_height + rowIndx] += (value * x[c * A->tile_width + colIndx]);
}
}
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
B = readCOOMatrix(p.fileName);
sortCOOMatrix(B);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
A = coo2dcoo(B, horz_partitions, vert_partitions);
freeCOOMatrix(B);
// Initialize help data - Padding needed
uint32_t ncols_pad = A->vert_partitions * A->tile_width;
uint32_t tile_width_pad = A->tile_width;
uint32_t nrows_pad = A->vert_partitions * A->tile_width;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_rows_per_dpu = 0;
uint64_t max_nnz_per_dpu = 0;
// Timer for measurements
Timer timer;
uint64_t total_nnzs = 0;
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for rows and non-zero elements needed for CPU-DPU transfers
uint32_t tile_row_indx = i / A->vert_partitions;
uint32_t tile_col_indx = i % A->vert_partitions;
uint32_t rows_per_dpu = A->tile_height;
uint32_t prev_rows_dpu = tile_row_indx * A->tile_height;
if (rows_per_dpu > max_rows_per_dpu)
max_rows_per_dpu = rows_per_dpu;
unsigned int nnz=0, nnz_pad;
nnz = A->nnzs_per_partition[i];
if (nnz % (8 / byte_dt) != 0)
nnz_pad = nnz + ((8 / byte_dt) - (nnz % (8 / byte_dt)));
else
nnz_pad = nnz;
if (nnz_pad > max_nnz_per_dpu)
max_nnz_per_dpu = nnz_pad;
uint32_t prev_nnz_dpu = total_nnzs;
total_nnzs += nnz;
// Keep information per DPU
dpu_info[i].rows_per_dpu = rows_per_dpu;
dpu_info[i].prev_rows_dpu = prev_rows_dpu;
dpu_info[i].prev_nnz_dpu = prev_nnz_dpu;
dpu_info[i].nnz = nnz;
dpu_info[i].nnz_pad = nnz_pad;
// Find input arguments per DPU
input_args[i].tcols = tile_width_pad;
// Distribute NNZ among tasklets within DPU
for(unsigned int tasklet_id=0; tasklet_id < NR_TASKLETS; tasklet_id++) {
uint32_t nnz_chunks = nnz / NR_TASKLETS;
uint32_t rest_nnzs = nnz % NR_TASKLETS;
uint32_t nnz_per_tasklet = nnz_chunks;
uint32_t prev_nnz;
if (tasklet_id < rest_nnzs)
nnz_per_tasklet++;
if (rest_nnzs > 0) {
if (tasklet_id >= rest_nnzs)
prev_nnz = rest_nnzs * (nnz_chunks + 1) + (tasklet_id - rest_nnzs) * nnz_chunks;
else
prev_nnz = tasklet_id * (nnz_chunks + 1);
} else {
prev_nnz = tasklet_id * nnz_chunks;
}
input_args[i].start_nnz[tasklet_id] = prev_nnz;
input_args[i].nnz_per_tasklet[tasklet_id] = nnz_per_tasklet;
}
}
// Initializations for parallel transfers with padding needed
if (max_rows_per_dpu % (8 / byte_dt) != 0)
max_rows_per_dpu += ((8 / byte_dt) - (max_rows_per_dpu % (8 / byte_dt)));
if (max_nnz_per_dpu % (8 / byte_dt) != 0)
max_nnz_per_dpu += ((8 / byte_dt) - (max_nnz_per_dpu % (8 / byte_dt)));
// Re-allocations for padding needed
A->nnzs = (struct elem_t *) realloc(A->nnzs, (dpu_info[nr_of_dpus-1].prev_nnz_dpu + max_nnz_per_dpu) * sizeof(struct elem_t));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus) * ((uint64_t) max_rows_per_dpu), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = ((max_nnz_per_dpu) * sizeof(struct elem_t)) + (tile_width_pad * sizeof(val_dt)) + (max_rows_per_dpu * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_rows_per_dpu = max_rows_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->nnzs + dpu_info[i].prev_nnz_dpu));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt), max_nnz_per_dpu * sizeof(struct elem_t), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i % A->vert_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (default: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + i * max_rows_per_dpu));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_rows_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t;
#pragma omp parallel for num_threads(p.nthreads) shared(A, y, max_rows_per_dpu) private(r,c,t) collapse(2)
for (r = 0; r < A->horz_partitions; r++) {
for (t = 0; t < A->tile_height; t++) {
for (c = 1; c < A->vert_partitions; c++) {
y[r * A->vert_partitions * max_rows_per_dpu + t] += y[r * A->vert_partitions * max_rows_per_dpu + c * max_rows_per_dpu + t];
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t t = 0; t < A->tile_height; t++) {
if((r * A->tile_height + t < A->nrows) && y_host[i] != y[r * A->vert_partitions * max_rows_per_dpu + t]) {
status = false;
}
i++;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeDCOOMatrix(A);
free(x);
free(y);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
IJVector_parcsr.c
|
/*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJVector_Par interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJVectorCreatePar
*
* creates ParVector if necessary, and leaves a pointer to it as the
* hypre_IJVector object
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorCreatePar(hypre_IJVector *vector,
HYPRE_Int *IJpartitioning)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int num_procs, jmin, global_n, *partitioning, j;
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
jmin = hypre_IJVectorGlobalFirstRow(vector);
global_n = hypre_IJVectorGlobalNumRows(vector);
partitioning = hypre_CTAlloc(HYPRE_Int, 2);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < 2; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#else
jmin = IJpartitioning[0];
global_n = IJpartitioning[num_procs] - jmin;
partitioning = hypre_CTAlloc(HYPRE_Int, num_procs+1);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < num_procs+1; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#endif
hypre_IJVectorObject(vector) =
hypre_ParVectorCreate(comm, global_n, (HYPRE_Int *) partitioning);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDestroyPar
*
* frees ParVector local storage of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDestroyPar(hypre_IJVector *vector)
{
return hypre_ParVectorDestroy((hypre_ParVector*)hypre_IJVectorObject(vector));
}
/******************************************************************************
*
* hypre_IJVectorInitializePar
*
* initializes ParVector of IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorInitializePar(hypre_IJVector *vector)
{
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_Int *partitioning = hypre_ParVectorPartitioning(par_vector);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
HYPRE_Int my_id;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_MPI_Comm_rank(comm,&my_id);
if (!partitioning)
{
if (print_level)
{
hypre_printf("No ParVector partitioning for initialization -- ");
hypre_printf("hypre_IJVectorInitializePar\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_VectorSize(local_vector) = partitioning[1] - partitioning[0];
#else
hypre_VectorSize(local_vector) = partitioning[my_id+1] - partitioning[my_id];
#endif
hypre_ParVectorInitialize(par_vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorInitialize(aux_vector);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetMaxOffProcElmtsPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParVector *aux_vector;
aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDistributePar
*
* takes an IJVector generated for one processor and distributes it
* across many processors according to vec_starts,
* if vec_starts is NULL, it distributes them evenly?
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDistributePar(hypre_IJVector *vector,
const HYPRE_Int *vec_starts)
{
hypre_ParVector *old_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_ParVector *par_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!old_vector)
{
if (print_level)
{
hypre_printf("old_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
hypre_ParVectorLocalVector(old_vector),
(HYPRE_Int *)vec_starts);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is unallocated ****\n");
}
hypre_error_in_arg(1);
}
hypre_ParVectorDestroy(old_vector);
hypre_IJVectorObject(vector) = par_vector;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorZeroValuesPar
*
* zeroes all local components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorZeroValuesPar(hypre_IJVector *vector)
{
HYPRE_Int my_id;
HYPRE_Int i, vec_start, vec_stop;
HYPRE_Complex *data;
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int *partitioning;
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
partitioning = hypre_ParVectorPartitioning(par_vector);
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = partitioning[0];
vec_stop = partitioning[1];
#else
vec_start = partitioning[my_id];
vec_stop = partitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData( local_vector );
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < vec_stop - vec_start; i++)
data[i] = 0.;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetValuesPar
*
* sets a potentially noncontiguous set of components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be set, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, store
indices and values in auxiliary vector structure. If indices == NULL,
assume that num_values components are to be set in a block starting at
vec_start. NOTE: If indices == NULL off proc values are ignored!!! */
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);
HYPRE_Int ii;
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
for (ii = 0; ii < current_num_elmts; ii++)
{
if (i == off_proc_i[ii])
{
off_proc_i[ii] = -1;
cancel_indx++;
}
}
hypre_AuxParVectorCancelIndx(aux_vector) = cancel_indx;
}
else /* local values are inserted into the vector */
{
i -= vec_start;
data[i] = values[j];
}
}
}
else
{
if (num_values > vec_stop - vec_start + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = vec_stop - vec_start +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] = values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAddToValuesPar
*
* adds to a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAddToValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int max_off_proc_elmts
= hypre_AuxParVectorMaxOffProcElmts(aux_vector);
HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
/* if elements outside processor boundaries, store in off processor
stash */
if (!max_off_proc_elmts)
{
max_off_proc_elmts = 100;
hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector)
= hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);
hypre_AuxParVectorOffProcData(aux_vector)
= hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);
off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
}
else if (current_num_elmts + 1 > max_off_proc_elmts)
{
max_off_proc_elmts += 10;
off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,max_off_proc_elmts);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts);
hypre_AuxParVectorMaxOffProcElmts(aux_vector)
= max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
}
off_proc_i[current_num_elmts] = i;
off_proc_data[current_num_elmts++] = values[j];
hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
}
else /* local values are added to the vector */
{
i -= vec_start;
data[i] += values[j];
}
}
}
else
{
if (num_values > vec_stop - vec_start + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = vec_stop - vec_start +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] += values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAssemblePar
*
* currently tests existence of of ParVector object and its partitioning
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAssemblePar(hypre_IJVector *vector)
{
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_Int *partitioning;
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
partitioning = hypre_ParVectorPartitioning(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** ParVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (aux_vector)
{
HYPRE_Int off_proc_elmts, current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int *off_proc_i;
HYPRE_Complex *off_proc_data;
HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);
HYPRE_Int current_i, ii;
current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);
if (cancel_indx)
{
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
current_i = 0;
for (ii=0; ii < current_num_elmts; ii++)
{
if (off_proc_i[ii] != -1)
{
off_proc_i[current_i] = off_proc_i[ii];
off_proc_data[current_i++] = off_proc_data[ii];
}
}
hypre_AuxParVectorCurrentNumElmts(aux_vector) = current_i;
current_num_elmts = current_i;
}
hypre_MPI_Allreduce(¤t_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT,
hypre_MPI_SUM,comm);
if (off_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts,
current_num_elmts, off_proc_i, off_proc_data);
hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector));
hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector));
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;
hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorGetValuesPar
*
* get a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorGetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int ierr = 0;
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1];
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, let
user know of catastrophe and exit. If indices == NULL, assume that
num_values components are to be retrieved from block starting at
vec_start */
if (indices)
{
for (i = 0; i < num_values; i++)
{
ierr += (indices[i] < vec_start);
ierr += (indices[i] >= vec_stop);
}
}
if (ierr)
{
if (print_level)
{
hypre_printf("indices beyond local range -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Indices specified are unusable ****\n");
}
hypre_error_in_arg(3);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
{
i = indices[j] - vec_start;
values[j] = data[i];
}
}
else
{
if (num_values > (vec_stop-vec_start))
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
values[j] = data[j];
}
return hypre_error_flag;
}
/******************************************************************************
* hypre_IJVectorAssembleOffProcValsPar
*
* This is for handling set and get values calls to off-proc. entries - it is
* called from assemble. There is an alternate version for when the assumed
* partition is being used.
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Complex *off_proc_data)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, j, j2, row;
HYPRE_Int iii, indx, ip, first_index;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends2;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_Int *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_Int *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
HYPRE_Complex *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJVectorPartitioning(vector);
first_index = partitioning[my_id];
info = hypre_CTAlloc(HYPRE_Int,num_procs);
proc_id_mem = hypre_CTAlloc(HYPRE_Int,current_num_elmts);
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[i] = proc_id;
info[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
num_sends2 = 2*num_sends;
send_procs = hypre_CTAlloc(HYPRE_Int,num_sends);
send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);
int_buffer = hypre_CTAlloc(HYPRE_Int,num_sends2);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = info[i];
}
}
hypre_MPI_Allgather(&num_sends2,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs]);
hypre_MPI_Allgatherv(int_buffer,num_sends2,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer);
hypre_TFree(info);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=2)
{
if (recv_buf[j] == my_id)
num_recvs++;
}
recv_procs = hypre_CTAlloc(HYPRE_Int,num_recvs);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
j2 = 0;
recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=2)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2++] = i;
recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];
}
if (j2 == num_recvs) break;
}
}
hypre_TFree(recv_buf);
hypre_TFree(displs);
/* set up data to be sent to send procs */
/* send_i contains for each send proc
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);
send_data = hypre_CTAlloc(HYPRE_Complex,send_map_starts[num_sends]);
recv_i = hypre_CTAlloc(HYPRE_Int,recv_vec_starts[num_recvs]);
recv_data = hypre_CTAlloc(HYPRE_Complex,recv_vec_starts[num_recvs]);
for (i=0; i < current_num_elmts; i++)
{
proc_id = proc_id_mem[i];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
send_i[iii] = off_proc_i[i];
send_data[iii] = off_proc_data[i];
send_map_starts[indx]++;
}
hypre_TFree(proc_id_mem);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
}
send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests);
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_INT,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_INT,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
hypre_TFree(requests);
hypre_TFree(status);
hypre_TFree(send_i);
hypre_TFree(send_data);
hypre_TFree(send_procs);
hypre_TFree(send_map_starts);
hypre_TFree(recv_procs);
for (i=0; i < recv_vec_starts[num_recvs]; i++)
{
row = recv_i[i];
j = row - first_index;
data[j] += recv_data[i];
}
hypre_TFree(recv_vec_starts);
hypre_TFree(recv_i);
hypre_TFree(recv_data);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Complex *off_proc_data)
{
HYPRE_Int myid, global_num_rows;
HYPRE_Int global_first_row;
HYPRE_Int i, j, in, k;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_Int ex_num_contacts = 0;
HYPRE_Int range_start, range_end;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_Int row, num_ranges, row_count;
HYPRE_Int num_recvs;
HYPRE_Int counter, upper_bound;
HYPRE_Int num_real_procs;
HYPRE_Int *row_list=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_Int *response_buf = NULL, *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL;
HYPRE_Int tmp_int;
HYPRE_Int obj_size_bytes, int_size, complex_size;
HYPRE_Int first_index;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Int *ex_contact_buf=NULL;
HYPRE_Complex *vector_data;
HYPRE_Complex value;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_rows = hypre_IJVectorGlobalNumRows(vector);
global_first_row = hypre_IJVectorGlobalFirstRow(vector);
/* verify that we have created the assumed partition */
if (hypre_IJVectorAssumedPart(vector) == NULL)
{
hypre_IJVectorCreateAssumedPartition(vector);
}
apart = (hypre_IJAssumedPart*) hypre_IJVectorAssumedPart(vector);
/* get the assumed processor id for each row */
a_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
orig_order = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
row_list = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
if (current_num_elmts > 0)
{
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
row_list[i] = row;
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_rows, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_qsort3i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < current_num_elmts; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact
assumed processors and find out who the actual row owner is - we
will contact with a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1);
ex_contact_buf = hypre_CTAlloc(HYPRE_Int, ex_num_contacts*2);
counter = 0;
range_end = -1;
for (i=0; i< current_num_elmts; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1];
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_row,
global_num_rows, &range_start, &range_end);
}
}
/*finish the starts*/
ex_contact_vec_starts[counter] = counter*2;
/*finish the last range*/
if (counter > 0)
ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1];
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj1, max_response_size, 4,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by an upper bound for the
range. */
hypre_TFree(ex_contact_procs);
hypre_TFree(ex_contact_buf);
hypre_TFree(ex_contact_vec_starts);
hypre_TFree(a_proc_id);
a_proc_id = NULL;
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < current_num_elmts && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real procesors ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int and HYPRE_Complex data. (row number and value) - we will send
everything as a void since we may not know the rel sizes of ints and
doubles */
/* first find out how many elements to send per proc - so we can do
storage */
int_size = sizeof(HYPRE_Int);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(int_size, complex_size);
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < current_num_elmts; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
}
}
}
/* calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2* num_rows_per_proc[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);
index_ptr = void_contact_buf; /* step through with this index */
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order */
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
for (i=0; i < current_num_elmts; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id);
prev_id = -1;
for (i=0; i < current_num_elmts; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in < 0)
{
in = -in - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
memcpy( index_ptr, &tmp_int, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
}
/* add row # */
memcpy( index_ptr, &row, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* add value */
tmp_complex = off_proc_data[i];
memcpy( index_ptr, &tmp_complex, complex_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* increment the indexes to keep track of where we are - fix later */
ex_contact_vec_starts[indx] = in;
}
/* some clean up */
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(us_real_proc_id);
hypre_TFree(orig_order);
hypre_TFree(row_list);
hypre_TFree(num_rows_per_proc);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* now get the info in send_proc_obj_d */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id = NULL; /* don't care who sent it to us */
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_MAlloc(obj_size_bytes*send_proc_obj.element_storage_length);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 5,
comm, (void **) &response_buf, &response_buf_starts);
/***********************************/
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(ex_contact_procs);
hypre_TFree(void_contact_buf);
hypre_TFree(ex_contact_vec_starts);
/* Now we can unpack the send_proc_objects and either set or add to the
vector data */
num_recvs = send_proc_obj.length;
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
first_index = hypre_ParVectorFirstIndex(par_vector);
for (i=0; i < num_recvs; i++)
{
indx = recv_starts[i];
/* get the number of rows for this recv */
memcpy( &row_count, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < row_count; j++) /* for each row: unpack info */
{
/* row # */
memcpy( &row, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* value */
memcpy( &value, recv_data_ptr, complex_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
k = row - first_index - global_first_row;
vector_data[k] += value;
}
}
hypre_TFree(send_proc_obj.v_elements);
hypre_TFree(send_proc_obj.vec_starts);
return hypre_error_flag;
}
#endif
|
phase_trans.c
|
/*
This source file is part of the atmostracers library, which is released under the MIT license.
Github repository: https://github.com/OpenNWP/atmostracers
*/
/*
This file contains functions calculating everything related to phase transition rates.
*/
#include "../include/atmostracers.h"
#include <math.h>
#include <stdlib.h>
int calc_h2otracers_source_rates(double mass_source_rates[], double heat_source_rates[], double densities[],
double tracer_temperature_densities[], double temperature[], int number_of_scalars, double delta_t, int assume_lte, int is_land[], int number_of_layers,
double z_scalar[], double z_vector[], double v_squared[], int no_of_vector_per_layer, double roughness_length[], double temperature_soil[],
double power_flux_density_sensible[], double power_flux_density_latent[], int soil_on)
{
/*
This function calculates phase transition rates and associated heat source rates.
It assumes the following order for the constituents:
precipitating ice - precipitating liquid water - cloud ice - liquid cloud water - dry air - water vapour
*/
double diff_density, phase_trans_density, saturation_pressure, water_vapour_pressure, solid_temperature, liquid_temperature,
flux_resistance, layer_thickness, diff_density_sfc, saturation_pressure_sfc;
// maximum cloud water content in (kg cloud)/(kg dry air).
double maximum_cloud_water_content = 0.2e-3;
int number_of_scalars_h = number_of_scalars/number_of_layers;
// loop over all grid boxes
int layer_index, h_index;
#pragma omp parallel for private(diff_density, phase_trans_density, saturation_pressure, water_vapour_pressure, solid_temperature, liquid_temperature, layer_index, h_index, flux_resistance, layer_thickness, diff_density_sfc, saturation_pressure_sfc)
for (int i = 0; i < number_of_scalars; ++i)
{
layer_index = i/number_of_scalars_h;
// determining the temperature of the cloud ice
if (densities[2*number_of_scalars + i] < EPSILON_SECURITY)
{
solid_temperature = T_0;
}
else if (assume_lte == 1)
{
solid_temperature = temperature[i];
}
else
{
solid_temperature = tracer_temperature_densities[2*number_of_scalars + i]/densities[2*number_of_scalars + i];
}
// determining the temperature of the liquid cloud water
if (densities[3*number_of_scalars + i] < EPSILON_SECURITY)
{
liquid_temperature = T_0;
}
else if (assume_lte == 1)
{
liquid_temperature = temperature[i];
}
else
{
liquid_temperature = tracer_temperature_densities[3*number_of_scalars + i]/densities[3*number_of_scalars + i];
}
// determining the saturation pressure
// "positive" temperatures (the saturation pressure is different over water compared to over ice)
if (temperature[i] >= T_0)
{
saturation_pressure = saturation_pressure_over_water(temperature[i]);
}
// "negative" temperatures
else
{
saturation_pressure = saturation_pressure_over_ice(temperature[i]);
}
// determining the water vapour pressure (using the EOS)
water_vapour_pressure = densities[5*number_of_scalars + i]*specific_gas_constants_lookup(1)*temperature[i];
// the amount of water vapour that the air can still take
diff_density = (saturation_pressure - water_vapour_pressure)/(specific_gas_constants_lookup(1)*temperature[i]);
// the case where the air is not over-saturated
if (diff_density >= 0)
{
// temperature >= 0 °C
if (temperature[i] >= T_0)
{
// It is assumed that the still present ice vanishes within one time step.
mass_source_rates[2*number_of_scalars + i] = -densities[2*number_of_scalars + i]/delta_t;
/*
The amount of liquid water per volume that will evaporate.
In case the air cannot take all the water, not everything will evaporate.
*/
phase_trans_density = fmin(densities[3*number_of_scalars + i], diff_density);
/*
The source rate for the liquid water consists of two terms:
1.) the evaporation
2.) the melting of ice
*/
mass_source_rates[3*number_of_scalars + i] = (densities[2*number_of_scalars + i] - phase_trans_density)/delta_t;
// the tendency for the water vapour
mass_source_rates[4*number_of_scalars + i] = phase_trans_density/delta_t;
// the heat source rates acting on the ice
heat_source_rates[2*number_of_scalars + i] = mass_source_rates[2*number_of_scalars + i]*phase_trans_heat(2, solid_temperature);
// the heat source rates acting on the liquid water
heat_source_rates[3*number_of_scalars + i] =
// the evaporation
-phase_trans_density*phase_trans_heat(0, T_0)/delta_t;
}
// temperature < 0 °C
else
{
// Everything that can sublimate will sublimate.
phase_trans_density = fmin(densities[2*number_of_scalars + i], diff_density);
/*
the tendency for the ice contains two terms:
1.) the freezing
2.) the phase transition through sublimation
*/
mass_source_rates[2*number_of_scalars + i] = (densities[3*number_of_scalars + i] - phase_trans_density)/delta_t;
// It is assumed that the still present liquid water vanishes within one time step.
mass_source_rates[3*number_of_scalars + i] = -densities[3*number_of_scalars + i]/delta_t;
// the tendency for the water vapour
mass_source_rates[4*number_of_scalars + i] = phase_trans_density/delta_t;
// the heat source rates acting on the ice
heat_source_rates[2*number_of_scalars + i] = (
// the freezing
densities[3*number_of_scalars + i]*phase_trans_heat(2, solid_temperature)
// the sublimation
- phase_trans_density*phase_trans_heat(1, solid_temperature))/delta_t;
// the heat source rates acting on the liquid water
heat_source_rates[3*number_of_scalars + i] = 0;
}
}
// the case where the air is over-saturated
else
{
// the vanishing of water vapour through the phase transition
mass_source_rates[4*number_of_scalars + i] = diff_density/delta_t;
// temperature >= 0 °C
if (temperature[i] >= T_0)
{
// It is assumed that the still present ice vanishes within one time step.
mass_source_rates[2*number_of_scalars + i] = -densities[2*number_of_scalars + i]/delta_t;
/*
The source rate for the liquid water consists of two terms:
1.) the condensation
2.) the melting of ice
*/
mass_source_rates[3*number_of_scalars + i] = (-diff_density + densities[2*number_of_scalars + i])/delta_t;
// the heat source rates acting on the ice
heat_source_rates[2*number_of_scalars + i] = -densities[2*number_of_scalars + i]*phase_trans_heat(2, solid_temperature)/delta_t;
// the heat source rates acting on the liquid water
heat_source_rates[3*number_of_scalars + i] =
// it is only affected by the condensation
-diff_density*phase_trans_heat(0, liquid_temperature)/delta_t;
}
// temperature < 0 °C
else
{
/*
The source rate for the ice consists of two terms:
1.) the resublimation
2.) the melting of ice
*/
mass_source_rates[2*number_of_scalars + i] = (-diff_density + densities[3*number_of_scalars + i])/delta_t;
// It is assumed that the liquid water disappears within one time step.
mass_source_rates[3*number_of_scalars + i] = -densities[3*number_of_scalars + i]/delta_t;
// the heat source rates acting on the ice
heat_source_rates[2*number_of_scalars + i] =
// the component through the resublimation
(-diff_density*phase_trans_heat(1, solid_temperature)
// the component through freezing
+ densities[3*number_of_scalars + i]*phase_trans_heat(2, solid_temperature))/delta_t;
// the heat source rates acting on the liquid water
heat_source_rates[3*number_of_scalars + i] = 0;
}
}
// creation of precipitation
// snow
mass_source_rates[i] = fmax(densities[2*number_of_scalars + i] - maximum_cloud_water_content*densities[4*number_of_scalars + i], 0)/delta_t;
// the snow creation comes at the cost of cloud ice particles
mass_source_rates[2*number_of_scalars + i] -= mass_source_rates[i];
// rain
mass_source_rates[number_of_scalars + i] = fmax(densities[3*number_of_scalars + i] - maximum_cloud_water_content*densities[4*number_of_scalars + i], 0)/delta_t;
// the rain creation comes at the cost of cloud water particles
mass_source_rates[3*number_of_scalars + i] -= mass_source_rates[number_of_scalars + i];
// turning of snow to rain
if (temperature[i] > T_0 && densities[i] > 0)
{
mass_source_rates[i] = -densities[i]/delta_t;
mass_source_rates[number_of_scalars + i] -= mass_source_rates[i];
}
// surface effects
if (layer_index == number_of_layers - 1 && soil_on == 1)
{
h_index = i - layer_index*number_of_scalars_h;
// flux resistance
flux_resistance = sfc_flux_resistance(pow(v_squared[i], 0.5), z_scalar[i] - z_vector[(layer_index + 1)*no_of_vector_per_layer + h_index], roughness_length[h_index]);
// sensible heat flux density through the surface (towards the surface)
power_flux_density_sensible[h_index] = densities[4*number_of_scalars + i]
*spec_heat_capacities_v_gas_lookup(0)*(temperature[i] - temperature_soil[h_index])/flux_resistance;
// evaporation and latent heat rates
if (is_land[h_index] == 0)
{
// saturation pressure at surface temperature
if (temperature_soil[h_index] >= T_0)
{
saturation_pressure_sfc = saturation_pressure_over_water(temperature_soil[h_index]);
}
else
{
saturation_pressure_sfc = saturation_pressure_over_ice(temperature_soil[h_index]);
}
// difference water vapour density between saturation at ground temperature and actual absolute humidity in the lowest model layer
diff_density_sfc = saturation_pressure_sfc/(specific_gas_constants_lookup(1)*temperature_soil[h_index])
- water_vapour_pressure/(specific_gas_constants_lookup(1)*temperature[i]);
// the thickness of the lowest model layer (we need it as a result of Guass' theorem)
layer_thickness = z_vector[layer_index*no_of_vector_per_layer + h_index] - z_vector[(layer_index + 1)*no_of_vector_per_layer + h_index];
mass_source_rates[4*number_of_scalars + i] += fmax(0, diff_density_sfc/flux_resistance)/layer_thickness;
// calculating the latent heat flux density affecting the surface
if (temperature_soil[h_index] >= T_0)
{
power_flux_density_latent[h_index] = -phase_trans_heat(0, temperature_soil[h_index])*fmax(0, diff_density_sfc/flux_resistance);
}
else
{
power_flux_density_latent[h_index] = -phase_trans_heat(1, temperature_soil[h_index])*fmax(0, diff_density_sfc/flux_resistance);
}
}
}
}
return 0;
}
double phase_trans_heat(int direction, double temperature)
{
/*
directions:
0: gas to liquid
1: gas to solid
2: liquid to solid
*/
double result;
if (direction == 0)
{
result = 2257000;
}
if (direction == 1)
{
result = 2257000 + 333500;
}
if (direction == 2)
{
result = 333500;
}
return result;
}
double sink_velocity(int solid_or_liquid, double radius, double air_density)
{
double dry_air_kinematic_viscosity = 14.8e-6;
double reynolds_crit = 10;
double drag_coeff = 1;
// First of all, a laminar sink velocity is calculated from the Stokes law.
double laminar_velocity_candidate = 0;
// The solid case.
if (solid_or_liquid == 0)
{
laminar_velocity_candidate = 2*M_PI*pow(radius, 2)*DENSITY_WATER*GRAVITY/(9*M_PI*air_density*dry_air_kinematic_viscosity);
}
// The liquid case.
if (solid_or_liquid == 1)
{
laminar_velocity_candidate = 2*M_PI*pow(radius, 2)*DENSITY_WATER*GRAVITY/(9*M_PI*air_density*dry_air_kinematic_viscosity);
}
// calculating the Reynolds number resulting from the laminar velocity
double reynolds_from_laminar;
reynolds_from_laminar = laminar_velocity_candidate*radius/dry_air_kinematic_viscosity;
// calculating the resulting sink velocity
double result;
// the laminar case
if (reynolds_from_laminar <= reynolds_crit)
{
result = laminar_velocity_candidate;
}
// the turbulent case
else
{
result = pow(8*radius*DENSITY_WATER*GRAVITY/(3*air_density*drag_coeff), 0.5);
}
return result;
}
|
pi_par.c
|
/*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The program was parallelized using OpenMP by adding just
four lines
(1) A line to include omp.h -- the include file that
contains OpenMP's function prototypes and constants.
(2) A pragma that tells OpenMP to create a team of threads
(3) A pragma to cause one of the threads to print the
number of threads being used by the program.
(4) A pragma to split up loop iterations among the team
of threads. This pragma includes 2 clauses to (1) create a
private variable and (2) to cause the threads to compute their
sums locally and then combine their local sums into a
single global value.
History: Written by Tim Mattson, 11/99.
#---------------------------------------------------------------
Modified by JGG to use threads equal to the number of processors.
SoC 2015.
#-----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
static long num_steps = 100000000;
double step;
int main (int argc, char ** argv)
{
int i, nprocs;
double x, pi, sum = 0.0;
double start_time, run_time;
long number_of_steps = argc >=3 && atol(argv[2]) > 0 ? atol(argv[2]) : num_steps;
step = 1.0/(double) number_of_steps;
/* Use double of system processors (threads) if there's no positional argument */
nprocs = argc >=2 && atol(argv[1]) > 0 ? atol(argv[1]) : 2*omp_get_num_procs();
/*Computes pi for each number of threads*/
for (i=1;i<=nprocs;i+=10){
sum = 0.0;
omp_set_num_threads(i);
start_time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
printf(" num_threads = %d",omp_get_num_threads());
#pragma omp for reduction(+:sum) private(x)
for (i=1;i<= number_of_steps; i++){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds and %d threads\n",pi,run_time,i);
if(argc>1)
printf("Threads: %d Seconds: %f \n", i, run_time);
}
}
|
otfft_avxdif16omp.h
|
/******************************************************************************
* OTFFT AVXDIF(Radix-16) of OpenMP Version 6.5
*
* Copyright (c) 2015 OK Ojisan(Takuya OKAHISA)
* Released under the MIT license
* http://opensource.org/licenses/mit-license.php
******************************************************************************/
#ifndef otfft_avxdif16omp_h
#define otfft_avxdif16omp_h
//#include "otfft/otfft_misc.h"
//#include "otfft_avxdif8.h"
namespace OTFFT_AVXDIF16omp { /////////////////////////////////////////////////
using namespace OTFFT_MISC;
///////////////////////////////////////////////////////////////////////////////
// Forward buffterfly operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct fwdcore
{
static const int n1 = n/16;
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
const ymm w1p = duppz3(W[1*sp]);
const ymm w2p = duppz3(W[2*sp]);
const ymm w3p = duppz3(W[3*sp]);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm x0 = getpz2(xq_sp+N0);
const ymm x1 = getpz2(xq_sp+N1);
const ymm x2 = getpz2(xq_sp+N2);
const ymm x3 = getpz2(xq_sp+N3);
const ymm x4 = getpz2(xq_sp+N4);
const ymm x5 = getpz2(xq_sp+N5);
const ymm x6 = getpz2(xq_sp+N6);
const ymm x7 = getpz2(xq_sp+N7);
const ymm x8 = getpz2(xq_sp+N8);
const ymm x9 = getpz2(xq_sp+N9);
const ymm xa = getpz2(xq_sp+Na);
const ymm xb = getpz2(xq_sp+Nb);
const ymm xc = getpz2(xq_sp+Nc);
const ymm xd = getpz2(xq_sp+Nd);
const ymm xe = getpz2(xq_sp+Ne);
const ymm xf = getpz2(xq_sp+Nf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq_s16p+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq_s16p+s*0x1, mulpz2(w1p, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz2(yq_s16p+s*0x2, mulpz2(w2p, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz2(yq_s16p+s*0x3, mulpz2(w3p, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz2(yq_s16p+s*0x4, mulpz2(w4p, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz2(yq_s16p+s*0x5, mulpz2(w5p, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz2(yq_s16p+s*0x6, mulpz2(w6p, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz2(yq_s16p+s*0x7, mulpz2(w7p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz2(yq_s16p+s*0x8, mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f)));
setpz2(yq_s16p+s*0x9, mulpz2(w9p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz2(yq_s16p+s*0xa, mulpz2(wap, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz2(yq_s16p+s*0xb, mulpz2(wbp, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz2(yq_s16p+s*0xc, mulpz2(wcp, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz2(yq_s16p+s*0xd, mulpz2(wdp, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz2(yq_s16p+s*0xe, mulpz2(wep, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz2(yq_s16p+s*0xf, mulpz2(wfp, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
}
}
};
template <int N> struct fwdcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = getpz2(W+p);
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm x0 = getpz2(x_p+N0);
const ymm x1 = getpz2(x_p+N1);
const ymm x2 = getpz2(x_p+N2);
const ymm x3 = getpz2(x_p+N3);
const ymm x4 = getpz2(x_p+N4);
const ymm x5 = getpz2(x_p+N5);
const ymm x6 = getpz2(x_p+N6);
const ymm x7 = getpz2(x_p+N7);
const ymm x8 = getpz2(x_p+N8);
const ymm x9 = getpz2(x_p+N9);
const ymm xa = getpz2(x_p+Na);
const ymm xb = getpz2(x_p+Nb);
const ymm xc = getpz2(x_p+Nc);
const ymm xd = getpz2(x_p+Nd);
const ymm xe = getpz2(x_p+Ne);
const ymm xf = getpz2(x_p+Nf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
#if 0
setpz3<16>(y_16p+0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz3<16>(y_16p+0x1, mulpz2(w1p, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz3<16>(y_16p+0x2, mulpz2(w2p, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz3<16>(y_16p+0x3, mulpz2(w3p, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz3<16>(y_16p+0x4, mulpz2(w4p, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz3<16>(y_16p+0x5, mulpz2(w5p, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz3<16>(y_16p+0x6, mulpz2(w6p, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz3<16>(y_16p+0x7, mulpz2(w7p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz3<16>(y_16p+0x8, mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f)));
setpz3<16>(y_16p+0x9, mulpz2(w9p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz3<16>(y_16p+0xa, mulpz2(wap, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz3<16>(y_16p+0xb, mulpz2(wbp, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz3<16>(y_16p+0xc, mulpz2(wcp, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz3<16>(y_16p+0xd, mulpz2(wdp, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz3<16>(y_16p+0xe, mulpz2(wep, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz3<16>(y_16p+0xf, mulpz2(wfp, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
#else
const ymm aA = addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f);
const ymm bB = mulpz2(w1p, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
const ymm cC = mulpz2(w2p, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
const ymm dD = mulpz2(w3p, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
const ymm eE = mulpz2(w4p, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
const ymm fF = mulpz2(w5p, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
const ymm gG = mulpz2(w6p, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
const ymm hH = mulpz2(w7p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
const ymm iI = mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
const ymm jJ = mulpz2(w9p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
const ymm kK = mulpz2(wap, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
const ymm lL = mulpz2(wbp, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
const ymm mM = mulpz2(wcp, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
const ymm nN = mulpz2(wdp, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
const ymm oO = mulpz2(wep, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
const ymm pP = mulpz2(wfp, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
const ymm ab = catlo(aA, bB);
const ymm AB = cathi(aA, bB);
const ymm cd = catlo(cC, dD);
const ymm CD = cathi(cC, dD);
const ymm ef = catlo(eE, fF);
const ymm EF = cathi(eE, fF);
const ymm gh = catlo(gG, hH);
const ymm GH = cathi(gG, hH);
const ymm ij = catlo(iI, jJ);
const ymm IJ = cathi(iI, jJ);
const ymm kl = catlo(kK, lL);
const ymm KL = cathi(kK, lL);
const ymm mn = catlo(mM, nN);
const ymm MN = cathi(mM, nN);
const ymm op = catlo(oO, pP);
const ymm OP = cathi(oO, pP);
setpz2(y_16p+0x00, ab);
setpz2(y_16p+0x02, cd);
setpz2(y_16p+0x04, ef);
setpz2(y_16p+0x06, gh);
setpz2(y_16p+0x08, ij);
setpz2(y_16p+0x0a, kl);
setpz2(y_16p+0x0c, mn);
setpz2(y_16p+0x0e, op);
setpz2(y_16p+0x10, AB);
setpz2(y_16p+0x12, CD);
setpz2(y_16p+0x14, EF);
setpz2(y_16p+0x16, GH);
setpz2(y_16p+0x18, IJ);
setpz2(y_16p+0x1a, KL);
setpz2(y_16p+0x1c, MN);
setpz2(y_16p+0x1e, OP);
#endif
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwd0end;
//-----------------------------------------------------------------------------
template <int s> struct fwd0end<16,s,1>
{
void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwd0end<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(y[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct fwd0end<16,s,0>
{
void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwd0end<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwdnend;
//-----------------------------------------------------------------------------
template <int s> struct fwdnend<16,s,1>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwdnend<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(y[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct fwdnend<16,s,0>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwdnend<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Forward FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwd0fft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdcore<n,s>()(x, y, W);
fwd0fft<n/16,16*s,!eo>()(y, x, W);
}
};
template <int s, bool eo> struct fwd0fft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwd0end<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF8omp::fwd0end<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::fwd0end<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::fwd0end<2,s,eo>()(x, y);
}
};
//-----------------------------------------------------------------------------
template <int n, int s, bool eo> struct fwdnfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdcore<n,s>()(x, y, W);
fwdnfft<n/16,16*s,!eo>()(y, x, W);
}
};
template <int s, bool eo> struct fwdnfft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdnend<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF8omp::fwdnend<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::fwdnend<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::fwdnend<2,s,eo>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse butterfly operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct invcore
{
static const int n1 = n/16;
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
const ymm w1p = duppz3(W[N-1*sp]);
const ymm w2p = duppz3(W[N-2*sp]);
const ymm w3p = duppz3(W[N-3*sp]);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm x0 = getpz2(xq_sp+N0);
const ymm x1 = getpz2(xq_sp+N1);
const ymm x2 = getpz2(xq_sp+N2);
const ymm x3 = getpz2(xq_sp+N3);
const ymm x4 = getpz2(xq_sp+N4);
const ymm x5 = getpz2(xq_sp+N5);
const ymm x6 = getpz2(xq_sp+N6);
const ymm x7 = getpz2(xq_sp+N7);
const ymm x8 = getpz2(xq_sp+N8);
const ymm x9 = getpz2(xq_sp+N9);
const ymm xa = getpz2(xq_sp+Na);
const ymm xb = getpz2(xq_sp+Nb);
const ymm xc = getpz2(xq_sp+Nc);
const ymm xd = getpz2(xq_sp+Nd);
const ymm xe = getpz2(xq_sp+Ne);
const ymm xf = getpz2(xq_sp+Nf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq_s16p+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq_s16p+s*0x1, mulpz2(w1p, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz2(yq_s16p+s*0x2, mulpz2(w2p, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz2(yq_s16p+s*0x3, mulpz2(w3p, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz2(yq_s16p+s*0x4, mulpz2(w4p, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz2(yq_s16p+s*0x5, mulpz2(w5p, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz2(yq_s16p+s*0x6, mulpz2(w6p, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz2(yq_s16p+s*0x7, mulpz2(w7p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz2(yq_s16p+s*0x8, mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f)));
setpz2(yq_s16p+s*0x9, mulpz2(w9p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz2(yq_s16p+s*0xa, mulpz2(wap, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz2(yq_s16p+s*0xb, mulpz2(wbp, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz2(yq_s16p+s*0xc, mulpz2(wcp, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz2(yq_s16p+s*0xd, mulpz2(wdp, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz2(yq_s16p+s*0xe, mulpz2(wep, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz2(yq_s16p+s*0xf, mulpz2(wfp, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
}
}
};
template <int N> struct invcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = cnjpz2(getpz2(W+p));
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm x0 = getpz2(x_p+N0);
const ymm x1 = getpz2(x_p+N1);
const ymm x2 = getpz2(x_p+N2);
const ymm x3 = getpz2(x_p+N3);
const ymm x4 = getpz2(x_p+N4);
const ymm x5 = getpz2(x_p+N5);
const ymm x6 = getpz2(x_p+N6);
const ymm x7 = getpz2(x_p+N7);
const ymm x8 = getpz2(x_p+N8);
const ymm x9 = getpz2(x_p+N9);
const ymm xa = getpz2(x_p+Na);
const ymm xb = getpz2(x_p+Nb);
const ymm xc = getpz2(x_p+Nc);
const ymm xd = getpz2(x_p+Nd);
const ymm xe = getpz2(x_p+Ne);
const ymm xf = getpz2(x_p+Nf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
#if 0
setpz3<16>(y_16p+0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz3<16>(y_16p+0x1, mulpz2(w1p, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz3<16>(y_16p+0x2, mulpz2(w2p, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz3<16>(y_16p+0x3, mulpz2(w3p, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz3<16>(y_16p+0x4, mulpz2(w4p, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz3<16>(y_16p+0x5, mulpz2(w5p, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz3<16>(y_16p+0x6, mulpz2(w6p, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz3<16>(y_16p+0x7, mulpz2(w7p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
setpz3<16>(y_16p+0x8, mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f)));
setpz3<16>(y_16p+0x9, mulpz2(w9p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f)));
setpz3<16>(y_16p+0xa, mulpz2(wap, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f)));
setpz3<16>(y_16p+0xb, mulpz2(wbp, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f)));
setpz3<16>(y_16p+0xc, mulpz2(wcp, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f)));
setpz3<16>(y_16p+0xd, mulpz2(wdp, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f)));
setpz3<16>(y_16p+0xe, mulpz2(wep, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f)));
setpz3<16>(y_16p+0xf, mulpz2(wfp, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f)));
#else
const ymm aA = addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f);
const ymm bB = mulpz2(w1p, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
const ymm cC = mulpz2(w2p, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
const ymm dD = mulpz2(w3p, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
const ymm eE = mulpz2(w4p, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
const ymm fF = mulpz2(w5p, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
const ymm gG = mulpz2(w6p, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
const ymm hH = mulpz2(w7p, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
const ymm iI = mulpz2(w8p, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
const ymm jJ = mulpz2(w9p, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
const ymm kK = mulpz2(wap, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
const ymm lL = mulpz2(wbp, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
const ymm mM = mulpz2(wcp, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
const ymm nN = mulpz2(wdp, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
const ymm oO = mulpz2(wep, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
const ymm pP = mulpz2(wfp, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
const ymm ab = catlo(aA, bB);
const ymm AB = cathi(aA, bB);
const ymm cd = catlo(cC, dD);
const ymm CD = cathi(cC, dD);
const ymm ef = catlo(eE, fF);
const ymm EF = cathi(eE, fF);
const ymm gh = catlo(gG, hH);
const ymm GH = cathi(gG, hH);
const ymm ij = catlo(iI, jJ);
const ymm IJ = cathi(iI, jJ);
const ymm kl = catlo(kK, lL);
const ymm KL = cathi(kK, lL);
const ymm mn = catlo(mM, nN);
const ymm MN = cathi(mM, nN);
const ymm op = catlo(oO, pP);
const ymm OP = cathi(oO, pP);
setpz2(y_16p+0x00, ab);
setpz2(y_16p+0x02, cd);
setpz2(y_16p+0x04, ef);
setpz2(y_16p+0x06, gh);
setpz2(y_16p+0x08, ij);
setpz2(y_16p+0x0a, kl);
setpz2(y_16p+0x0c, mn);
setpz2(y_16p+0x0e, op);
setpz2(y_16p+0x10, AB);
setpz2(y_16p+0x12, CD);
setpz2(y_16p+0x14, EF);
setpz2(y_16p+0x16, GH);
setpz2(y_16p+0x18, IJ);
setpz2(y_16p+0x1a, KL);
setpz2(y_16p+0x1c, MN);
setpz2(y_16p+0x1e, OP);
#endif
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct inv0end;
//-----------------------------------------------------------------------------
template <int s> struct inv0end<16,s,1>
{
void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct inv0end<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(y[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct inv0end<16,s,0>
{
void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct inv0end<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct invnend;
//-----------------------------------------------------------------------------
template <int s> struct invnend<16,s,1>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(yq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(yq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(yq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(yq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(yq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(yq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(yq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(yq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(yq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct invnend<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(y[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(y[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(y[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(y[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(y[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(y[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(y[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(y[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(y[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct invnend<16,s,0>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct invnend<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct inv0fft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invcore<n,s>()(x, y, W);
inv0fft<n/16,16*s,!eo>()(y, x, W);
}
};
template <int s, bool eo> struct inv0fft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
inv0end<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF8omp::inv0end<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::inv0end<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::inv0end<2,s,eo>()(x, y);
}
};
//-----------------------------------------------------------------------------
template <int n, int s, bool eo> struct invnfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invcore<n,s>()(x, y, W);
invnfft<n/16,16*s,!eo>()(y, x, W);
}
};
template <int s, bool eo> struct invnfft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invnend<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF8omp::invnend<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::invnend<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIF4omp::invnend<2,s,eo>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// 2 powered FFT routine
///////////////////////////////////////////////////////////////////////////////
inline void fwd(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwdnfft<(1<< 1),1,0>()(x, y, W); break;
case 2: fwdnfft<(1<< 2),1,0>()(x, y, W); break;
case 3: fwdnfft<(1<< 3),1,0>()(x, y, W); break;
case 4: fwdnfft<(1<< 4),1,0>()(x, y, W); break;
case 5: fwdnfft<(1<< 5),1,0>()(x, y, W); break;
case 6: fwdnfft<(1<< 6),1,0>()(x, y, W); break;
case 7: fwdnfft<(1<< 7),1,0>()(x, y, W); break;
case 8: fwdnfft<(1<< 8),1,0>()(x, y, W); break;
case 9: fwdnfft<(1<< 9),1,0>()(x, y, W); break;
case 10: fwdnfft<(1<<10),1,0>()(x, y, W); break;
case 11: fwdnfft<(1<<11),1,0>()(x, y, W); break;
case 12: fwdnfft<(1<<12),1,0>()(x, y, W); break;
case 13: fwdnfft<(1<<13),1,0>()(x, y, W); break;
case 14: fwdnfft<(1<<14),1,0>()(x, y, W); break;
case 15: fwdnfft<(1<<15),1,0>()(x, y, W); break;
case 16: fwdnfft<(1<<16),1,0>()(x, y, W); break;
case 17: fwdnfft<(1<<17),1,0>()(x, y, W); break;
case 18: fwdnfft<(1<<18),1,0>()(x, y, W); break;
case 19: fwdnfft<(1<<19),1,0>()(x, y, W); break;
case 20: fwdnfft<(1<<20),1,0>()(x, y, W); break;
case 21: fwdnfft<(1<<21),1,0>()(x, y, W); break;
case 22: fwdnfft<(1<<22),1,0>()(x, y, W); break;
case 23: fwdnfft<(1<<23),1,0>()(x, y, W); break;
case 24: fwdnfft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void fwd0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwd0fft<(1<< 1),1,0>()(x, y, W); break;
case 2: fwd0fft<(1<< 2),1,0>()(x, y, W); break;
case 3: fwd0fft<(1<< 3),1,0>()(x, y, W); break;
case 4: fwd0fft<(1<< 4),1,0>()(x, y, W); break;
case 5: fwd0fft<(1<< 5),1,0>()(x, y, W); break;
case 6: fwd0fft<(1<< 6),1,0>()(x, y, W); break;
case 7: fwd0fft<(1<< 7),1,0>()(x, y, W); break;
case 8: fwd0fft<(1<< 8),1,0>()(x, y, W); break;
case 9: fwd0fft<(1<< 9),1,0>()(x, y, W); break;
case 10: fwd0fft<(1<<10),1,0>()(x, y, W); break;
case 11: fwd0fft<(1<<11),1,0>()(x, y, W); break;
case 12: fwd0fft<(1<<12),1,0>()(x, y, W); break;
case 13: fwd0fft<(1<<13),1,0>()(x, y, W); break;
case 14: fwd0fft<(1<<14),1,0>()(x, y, W); break;
case 15: fwd0fft<(1<<15),1,0>()(x, y, W); break;
case 16: fwd0fft<(1<<16),1,0>()(x, y, W); break;
case 17: fwd0fft<(1<<17),1,0>()(x, y, W); break;
case 18: fwd0fft<(1<<18),1,0>()(x, y, W); break;
case 19: fwd0fft<(1<<19),1,0>()(x, y, W); break;
case 20: fwd0fft<(1<<20),1,0>()(x, y, W); break;
case 21: fwd0fft<(1<<21),1,0>()(x, y, W); break;
case 22: fwd0fft<(1<<22),1,0>()(x, y, W); break;
case 23: fwd0fft<(1<<23),1,0>()(x, y, W); break;
case 24: fwd0fft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void fwdn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
fwd(log_N, x, y, W);
}
inline void fwd0o(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwd0fft<(1<< 1),1,1>()(x, y, W); break;
case 2: fwd0fft<(1<< 2),1,1>()(x, y, W); break;
case 3: fwd0fft<(1<< 3),1,1>()(x, y, W); break;
case 4: fwd0fft<(1<< 4),1,1>()(x, y, W); break;
case 5: fwd0fft<(1<< 5),1,1>()(x, y, W); break;
case 6: fwd0fft<(1<< 6),1,1>()(x, y, W); break;
case 7: fwd0fft<(1<< 7),1,1>()(x, y, W); break;
case 8: fwd0fft<(1<< 8),1,1>()(x, y, W); break;
case 9: fwd0fft<(1<< 9),1,1>()(x, y, W); break;
case 10: fwd0fft<(1<<10),1,1>()(x, y, W); break;
case 11: fwd0fft<(1<<11),1,1>()(x, y, W); break;
case 12: fwd0fft<(1<<12),1,1>()(x, y, W); break;
case 13: fwd0fft<(1<<13),1,1>()(x, y, W); break;
case 14: fwd0fft<(1<<14),1,1>()(x, y, W); break;
case 15: fwd0fft<(1<<15),1,1>()(x, y, W); break;
case 16: fwd0fft<(1<<16),1,1>()(x, y, W); break;
case 17: fwd0fft<(1<<17),1,1>()(x, y, W); break;
case 18: fwd0fft<(1<<18),1,1>()(x, y, W); break;
case 19: fwd0fft<(1<<19),1,1>()(x, y, W); break;
case 20: fwd0fft<(1<<20),1,1>()(x, y, W); break;
case 21: fwd0fft<(1<<21),1,1>()(x, y, W); break;
case 22: fwd0fft<(1<<22),1,1>()(x, y, W); break;
case 23: fwd0fft<(1<<23),1,1>()(x, y, W); break;
case 24: fwd0fft<(1<<24),1,1>()(x, y, W); break;
}
}
inline void fwdno(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwdnfft<(1<< 1),1,1>()(x, y, W); break;
case 2: fwdnfft<(1<< 2),1,1>()(x, y, W); break;
case 3: fwdnfft<(1<< 3),1,1>()(x, y, W); break;
case 4: fwdnfft<(1<< 4),1,1>()(x, y, W); break;
case 5: fwdnfft<(1<< 5),1,1>()(x, y, W); break;
case 6: fwdnfft<(1<< 6),1,1>()(x, y, W); break;
case 7: fwdnfft<(1<< 7),1,1>()(x, y, W); break;
case 8: fwdnfft<(1<< 8),1,1>()(x, y, W); break;
case 9: fwdnfft<(1<< 9),1,1>()(x, y, W); break;
case 10: fwdnfft<(1<<10),1,1>()(x, y, W); break;
case 11: fwdnfft<(1<<11),1,1>()(x, y, W); break;
case 12: fwdnfft<(1<<12),1,1>()(x, y, W); break;
case 13: fwdnfft<(1<<13),1,1>()(x, y, W); break;
case 14: fwdnfft<(1<<14),1,1>()(x, y, W); break;
case 15: fwdnfft<(1<<15),1,1>()(x, y, W); break;
case 16: fwdnfft<(1<<16),1,1>()(x, y, W); break;
case 17: fwdnfft<(1<<17),1,1>()(x, y, W); break;
case 18: fwdnfft<(1<<18),1,1>()(x, y, W); break;
case 19: fwdnfft<(1<<19),1,1>()(x, y, W); break;
case 20: fwdnfft<(1<<20),1,1>()(x, y, W); break;
case 21: fwdnfft<(1<<21),1,1>()(x, y, W); break;
case 22: fwdnfft<(1<<22),1,1>()(x, y, W); break;
case 23: fwdnfft<(1<<23),1,1>()(x, y, W); break;
case 24: fwdnfft<(1<<24),1,1>()(x, y, W); break;
}
}
///////////////////////////////////////////////////////////////////////////////
inline void inv(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: inv0fft<(1<< 1),1,0>()(x, y, W); break;
case 2: inv0fft<(1<< 2),1,0>()(x, y, W); break;
case 3: inv0fft<(1<< 3),1,0>()(x, y, W); break;
case 4: inv0fft<(1<< 4),1,0>()(x, y, W); break;
case 5: inv0fft<(1<< 5),1,0>()(x, y, W); break;
case 6: inv0fft<(1<< 6),1,0>()(x, y, W); break;
case 7: inv0fft<(1<< 7),1,0>()(x, y, W); break;
case 8: inv0fft<(1<< 8),1,0>()(x, y, W); break;
case 9: inv0fft<(1<< 9),1,0>()(x, y, W); break;
case 10: inv0fft<(1<<10),1,0>()(x, y, W); break;
case 11: inv0fft<(1<<11),1,0>()(x, y, W); break;
case 12: inv0fft<(1<<12),1,0>()(x, y, W); break;
case 13: inv0fft<(1<<13),1,0>()(x, y, W); break;
case 14: inv0fft<(1<<14),1,0>()(x, y, W); break;
case 15: inv0fft<(1<<15),1,0>()(x, y, W); break;
case 16: inv0fft<(1<<16),1,0>()(x, y, W); break;
case 17: inv0fft<(1<<17),1,0>()(x, y, W); break;
case 18: inv0fft<(1<<18),1,0>()(x, y, W); break;
case 19: inv0fft<(1<<19),1,0>()(x, y, W); break;
case 20: inv0fft<(1<<20),1,0>()(x, y, W); break;
case 21: inv0fft<(1<<21),1,0>()(x, y, W); break;
case 22: inv0fft<(1<<22),1,0>()(x, y, W); break;
case 23: inv0fft<(1<<23),1,0>()(x, y, W); break;
case 24: inv0fft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void inv0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
inv(log_N, x, y, W);
}
inline void invn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: invnfft<(1<< 1),1,0>()(x, y, W); break;
case 2: invnfft<(1<< 2),1,0>()(x, y, W); break;
case 3: invnfft<(1<< 3),1,0>()(x, y, W); break;
case 4: invnfft<(1<< 4),1,0>()(x, y, W); break;
case 5: invnfft<(1<< 5),1,0>()(x, y, W); break;
case 6: invnfft<(1<< 6),1,0>()(x, y, W); break;
case 7: invnfft<(1<< 7),1,0>()(x, y, W); break;
case 8: invnfft<(1<< 8),1,0>()(x, y, W); break;
case 9: invnfft<(1<< 9),1,0>()(x, y, W); break;
case 10: invnfft<(1<<10),1,0>()(x, y, W); break;
case 11: invnfft<(1<<11),1,0>()(x, y, W); break;
case 12: invnfft<(1<<12),1,0>()(x, y, W); break;
case 13: invnfft<(1<<13),1,0>()(x, y, W); break;
case 14: invnfft<(1<<14),1,0>()(x, y, W); break;
case 15: invnfft<(1<<15),1,0>()(x, y, W); break;
case 16: invnfft<(1<<16),1,0>()(x, y, W); break;
case 17: invnfft<(1<<17),1,0>()(x, y, W); break;
case 18: invnfft<(1<<18),1,0>()(x, y, W); break;
case 19: invnfft<(1<<19),1,0>()(x, y, W); break;
case 20: invnfft<(1<<20),1,0>()(x, y, W); break;
case 21: invnfft<(1<<21),1,0>()(x, y, W); break;
case 22: invnfft<(1<<22),1,0>()(x, y, W); break;
case 23: invnfft<(1<<23),1,0>()(x, y, W); break;
case 24: invnfft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void inv0o(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: inv0fft<(1<< 1),1,1>()(x, y, W); break;
case 2: inv0fft<(1<< 2),1,1>()(x, y, W); break;
case 3: inv0fft<(1<< 3),1,1>()(x, y, W); break;
case 4: inv0fft<(1<< 4),1,1>()(x, y, W); break;
case 5: inv0fft<(1<< 5),1,1>()(x, y, W); break;
case 6: inv0fft<(1<< 6),1,1>()(x, y, W); break;
case 7: inv0fft<(1<< 7),1,1>()(x, y, W); break;
case 8: inv0fft<(1<< 8),1,1>()(x, y, W); break;
case 9: inv0fft<(1<< 9),1,1>()(x, y, W); break;
case 10: inv0fft<(1<<10),1,1>()(x, y, W); break;
case 11: inv0fft<(1<<11),1,1>()(x, y, W); break;
case 12: inv0fft<(1<<12),1,1>()(x, y, W); break;
case 13: inv0fft<(1<<13),1,1>()(x, y, W); break;
case 14: inv0fft<(1<<14),1,1>()(x, y, W); break;
case 15: inv0fft<(1<<15),1,1>()(x, y, W); break;
case 16: inv0fft<(1<<16),1,1>()(x, y, W); break;
case 17: inv0fft<(1<<17),1,1>()(x, y, W); break;
case 18: inv0fft<(1<<18),1,1>()(x, y, W); break;
case 19: inv0fft<(1<<19),1,1>()(x, y, W); break;
case 20: inv0fft<(1<<20),1,1>()(x, y, W); break;
case 21: inv0fft<(1<<21),1,1>()(x, y, W); break;
case 22: inv0fft<(1<<22),1,1>()(x, y, W); break;
case 23: inv0fft<(1<<23),1,1>()(x, y, W); break;
case 24: inv0fft<(1<<24),1,1>()(x, y, W); break;
}
}
inline void invno(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: invnfft<(1<< 1),1,1>()(x, y, W); break;
case 2: invnfft<(1<< 2),1,1>()(x, y, W); break;
case 3: invnfft<(1<< 3),1,1>()(x, y, W); break;
case 4: invnfft<(1<< 4),1,1>()(x, y, W); break;
case 5: invnfft<(1<< 5),1,1>()(x, y, W); break;
case 6: invnfft<(1<< 6),1,1>()(x, y, W); break;
case 7: invnfft<(1<< 7),1,1>()(x, y, W); break;
case 8: invnfft<(1<< 8),1,1>()(x, y, W); break;
case 9: invnfft<(1<< 9),1,1>()(x, y, W); break;
case 10: invnfft<(1<<10),1,1>()(x, y, W); break;
case 11: invnfft<(1<<11),1,1>()(x, y, W); break;
case 12: invnfft<(1<<12),1,1>()(x, y, W); break;
case 13: invnfft<(1<<13),1,1>()(x, y, W); break;
case 14: invnfft<(1<<14),1,1>()(x, y, W); break;
case 15: invnfft<(1<<15),1,1>()(x, y, W); break;
case 16: invnfft<(1<<16),1,1>()(x, y, W); break;
case 17: invnfft<(1<<17),1,1>()(x, y, W); break;
case 18: invnfft<(1<<18),1,1>()(x, y, W); break;
case 19: invnfft<(1<<19),1,1>()(x, y, W); break;
case 20: invnfft<(1<<20),1,1>()(x, y, W); break;
case 21: invnfft<(1<<21),1,1>()(x, y, W); break;
case 22: invnfft<(1<<22),1,1>()(x, y, W); break;
case 23: invnfft<(1<<23),1,1>()(x, y, W); break;
case 24: invnfft<(1<<24),1,1>()(x, y, W); break;
}
}
} /////////////////////////////////////////////////////////////////////////////
#endif // otfft_avxdif16omp_h
|
phantom_g6.c
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "omp.h"
#include "avx_type.h"
#include "gravity.h"
#include "timeprof.h"
#include "gp6util.h"
static int flag_predict_j_particle;
static PrdPosVel prdposvel[NPIPES];
static NewAccJrk newaccjrk[NPIPES];
static double acccorr = - 0.125;
static double potcorr = 0.5;
// ----------------------------------------------------
void g6_open(int gpid)
{
int nthread;
nthread = omp_get_max_threads();
avx_open(nthread);
return;
}
void g6_open_(int *gpid)
{
g6_open(*gpid);
return;
}
// ----------------------------------------------------
void g6_set_tunit(int tunit)
{
// do nothing
return;
}
void g6_set_tunit_(int *tunit)
{
g6_set_tunit(*tunit);
return;
}
// ----------------------------------------------------
void g6_set_xunit(int xunit)
{
// do nothing
return;
}
void g6_set_xunit_(int *xunit)
{
g6_set_xunit(*xunit);
return;
}
// ----------------------------------------------------
void g6_reset(int gpid)
{
// do nothing
return;
}
void g6_reset_(int *gpid)
{
g6_reset(*gpid);
return;
}
// ----------------------------------------------------
void g6_reset_fofpga(int gpid)
{
// do nothing
return;
}
void g6_reset_fofpga_(int *gpid)
{
g6_reset_fofpga(*gpid);
return;
}
// ----------------------------------------------------
void g6_close(int gpid)
{
avx_close();
return;
}
void g6_close_(int *gpid)
{
g6_close(*gpid);
return;
}
// ----------------------------------------------------
int g6_npipes(void)
{
return NPIPES;
}
int g6_npipes_(void)
{
return g6_npipes();
}
// ----------------------------------------------------
void g6_set_j_particle(int gpid, int padr, int pidx,
double t, double dt, double mss,
double *snp, double *jrk, double *acc,
double *vel, double *pos)
{
avx_set_j_particle(padr, pidx, t, mss, pos, vel, acc, jrk);
return;
}
void g6_set_j_particle_(int *gpid, int *padr, int *pidx,
double *t, double *dt, double *mss,
double *snp, double *jrk, double *acc,
double *vel, double *pos)
{
g6_set_j_particle(*gpid, *padr, *pidx, *t, *dt, *mss, snp, jrk, acc, vel, pos);
return;
}
// ----------------------------------------------------
void g6_set_ti(int gpid, double time)
{
avx_set_ti(time);
flag_predict_j_particle = 1;
return;
}
void g6_set_ti_(int *gpid, double *time)
{
g6_set_ti(*gpid, *time);
return;
}
// ----------------------------------------------------
void g6calc_firsthalf(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3],
double (*acc)[3], double (*jrk)[3], double *pot,
double eps2, double *h2)
{
if(flag_predict_j_particle == 1){
avx_predict_j_particle(nj);
flag_predict_j_particle = 0;
}
avx_initialize_neighbourlist();
return;
}
void g6calc_firsthalf_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3],
double (*acc)[3], double (*jrk)[3], double *pot,
double *eps2, double *h2)
{
g6calc_firsthalf(*gpid, *nj, *ni, pidx, pos, vel, acc, jrk, pot, *eps2, h2);
return;
}
// ----------------------------------------------------
int g6calc_lasthalf(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2)
gravity_kernel(nj, &prdposvel[i], &newaccjrk[i]);
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
}
return 0;
}
int g6calc_lasthalf_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot)
{
return g6calc_lasthalf(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot);
}
// ----------------------------------------------------
int g6calc_lasthalf2(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2)
gravity_kernel2(nj, &prdposvel[i], &newaccjrk[i]);
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
nnb[i] = newaccjrk[i].nnb;
}
return 0;
}
int g6calc_lasthalf2_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb)
{
return g6calc_lasthalf2(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot, nnb);
}
// ----------------------------------------------------
int g6calc_lasthalf2p(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb, float *rnnb)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2)
gravity_kernel2(nj, &prdposvel[i], &newaccjrk[i]);
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
nnb[i] = newaccjrk[i].nnb;
rnnb[i] = newaccjrk[i].rnnb;
}
return 0;
}
int g6calc_lasthalf2p_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb, float *rnnb)
{
return g6calc_lasthalf2p(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot, nnb, rnnb);
}
// ----------------------------------------------------
int g6calc_lasthalfn(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
prdposvel[i].h2 = (float)h2[i];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2){
int ithread = omp_get_thread_num();
gravity_kerneln(nj, &prdposvel[i], &newaccjrk[i], i, ithread);
}
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
}
return 0;
}
int g6calc_lasthalfn_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot)
{
return g6calc_lasthalfn(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot);
}
// ----------------------------------------------------
int g6calc_lasthalf2n(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
prdposvel[i].h2 = (float)h2[i];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2){
int ithread = omp_get_thread_num();
gravity_kernel2n(nj, &prdposvel[i], &newaccjrk[i], i, ithread);
}
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
nnb[i] = newaccjrk[i].nnb;
}
return 0;
}
int g6calc_lasthalf2n_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb)
{
return g6calc_lasthalf2n(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot, nnb);
}
// ----------------------------------------------------
int g6calc_lasthalf2np(int gpid, int nj, int ni, int *pidx,
double (*pos)[3], double (*vel)[3], double eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb, float *rnnb)
{
int i;
for(i = 0; i < ni; i++){
prdposvel[i].xpos = pos[i][0];
prdposvel[i].ypos = pos[i][1];
prdposvel[i].zpos = pos[i][2];
prdposvel[i].xvel = vel[i][0];
prdposvel[i].yvel = vel[i][1];
prdposvel[i].zvel = vel[i][2];
prdposvel[i].id = (float)pidx[i];
prdposvel[i].eps2 = (float)eps2;
prdposvel[i].h2 = (float)h2[i];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < ni; i += 2){
int ithread = omp_get_thread_num();
gravity_kernel2n(nj, &prdposvel[i], &newaccjrk[i], i, ithread);
}
for(i = 0; i < ni; i++){
acc[i][0] = acccorr * newaccjrk[i].xacc;
acc[i][1] = acccorr * newaccjrk[i].yacc;
acc[i][2] = acccorr * newaccjrk[i].zacc;
jrk[i][0] = acccorr * newaccjrk[i].xjrk;
jrk[i][1] = acccorr * newaccjrk[i].yjrk;
jrk[i][2] = acccorr * newaccjrk[i].zjrk;
pot[i] = potcorr * newaccjrk[i].pot;
nnb[i] = newaccjrk[i].nnb;
rnnb[i] = newaccjrk[i].rnnb;
}
return 0;
}
int g6calc_lasthalf2np_(int *gpid, int *nj, int *ni, int *pidx,
double (*pos)[3], double (*vel)[3], double *eps2, double *h2,
double (*acc)[3], double (*jrk)[3], double *pot, int *nnb, float *rnnb)
{
return g6calc_lasthalf2np(*gpid, *nj, *ni, pidx, pos, vel, *eps2, h2, acc, jrk, pot, nnb, rnnb);
}
// ----------------------------------------------------
void g6_setup_njdata(int gpid, int nj)
{
// do nothing
return;
}
void g6_setup_njdata_(int *gpid, int *nj)
{
g6_setup_njdata(*gpid, *nj);
return;
}
// ----------------------------------------------------
int g6_read_neighbour_list(int gpid)
{
return avx_get_neighbourlist_error();
// 0: success, -1: hard err, 1: overflow of hardware memory
}
int g6_read_neighbour_list_(int *gpid)
{
return g6_read_neighbour_list(*gpid);;
}
// ----------------------------------------------------
int g6_get_neighbour_list(int gpid, int ipipe, int maxlen, int *nblen, int *nbl)
{
return avx_get_neighbourlist(ipipe, maxlen, nblen, nbl);
// 0: success, 1: nb > maxlen
}
int g6_get_neighbour_list_(int *gpid, int *ipipe, int *maxlen, int *nblen, int *nbl)
{
return g6_get_neighbour_list(*gpid, *ipipe, *maxlen, nblen, nbl);;
}
// ----------------------------------------------------
void g6_debugfunc(int tmp)
{
// avx_debugfunc();
FILE *fp;
fp = fopen("dump.dat", "a");
fprintf(fp, "%d\n", tmp);
fclose(fp);
return;
}
void g6_debugfunc_(int *tmp)
{
g6_debugfunc(*tmp);
return;
}
// ----------------------------------------------------
void g6_debugfunc_double(double tmp)
{
FILE *fp;
fp = fopen("dump.dat", "a");
fprintf(fp, "%+e\n", tmp);
fclose(fp);
return;
}
void g6_debugfunc_double_(double *tmp)
{
g6_debugfunc_double(*tmp);
return;
}
// ----------------------------------------------------
void g6_dump(double tim, int n, double *m, double (*x)[3], double (*v)[3])
{
int i;
char out[1024];
FILE *fp;
sprintf(out, "dump.dat.%05d", (int)tim);
fp = fopen(out, "w");
for(i = 0; i < n; i++){
fprintf(fp, "%+.16e", m[i]);
fprintf(fp, " %+.16e %+.16e %+.16e", x[i][0], x[i][1], x[i][2]);
fprintf(fp, " %+.16e %+.16e %+.16e", v[i][0], v[i][1], v[i][2]);
fprintf(fp, "\n");
}
fclose(fp);
return;
}
void g6_dump_(double *tim, int *n, double *m, double (*x)[3], double (*v)[3])
{
g6_dump(*tim, *n, m, x, v);
return;
}
|
Parser.h
|
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
pbkdf2-hmac-md5_fmt_plug.c
|
/*
* This software is Copyright (c) 2015 Dhiru and magnum
* and it is hereby released to
* the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_md5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_md5);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include "arch.h"
//#undef SIMD_COEF_32
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "stdint.h"
#include "pbkdf2_hmac_md5.h"
#include "pbkdf2_hmac_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PBKDF2-HMAC-MD5"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-MD5 " MD5_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-MD5 32/" ARCH_BITS_STR
#endif
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#if SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define PLAINTEXT_LENGTH 125
static struct custom_salt {
unsigned int length;
unsigned int rounds;
char salt[PBKDF2_32_MAX_SALT_SIZE];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[PBKDF2_MDx_BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int saltlen;
memset(&cs, 0, sizeof(cs));
if (!strncmp(ciphertext, PBKDF2_MD5_FORMAT_TAG, PBKDF2_MD5_TAG_LEN))
ciphertext += PBKDF2_MD5_TAG_LEN;
cs.rounds = atoi(ciphertext);
ciphertext = strchr(ciphertext, '$') + 1;
p = strchr(ciphertext, '$');
saltlen = 0;
memset(cs.salt, 0, sizeof(cs.salt));
while (ciphertext < p) { /** extract salt **/
cs.salt[saltlen++] =
atoi16[ARCH_INDEX(ciphertext[0])] * 16 +
atoi16[ARCH_INDEX(ciphertext[1])];
ciphertext += 2;
}
cs.length = saltlen;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#if SIMD_COEF_32
int lens[SSE_GROUP_SZ_MD5], i;
unsigned char *pin[SSE_GROUP_SZ_MD5];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_MD5];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_MD5; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_md5_sse((const unsigned char **)pin, lens,
(unsigned char*)cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
PBKDF2_MDx_BINARY_SIZE, 0);
#else
pbkdf2_md5((unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
(unsigned char*)cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
PBKDF2_MDx_BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
//dump_stuff_msg("\nbinary", crypt_out[count - 1], 16);
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], PBKDF2_MDx_BINARY_SIZE);
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_exact(char *source, int index)
{
return pbkdf2_hmac_md5_cmp_exact(get_key(index), source, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
struct fmt_main fmt_pbkdf2_hmac_md5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
PBKDF2_MDx_BINARY_SIZE,
PBKDF2_32_BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ PBKDF2_MD5_FORMAT_TAG },
pbkdf2_hmac_md5_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
pbkdf2_hmac_md5_valid,
pbkdf2_hmac_md5_split,
pbkdf2_hmac_md5_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
veccopy-ompt-target-map.c
|
#include <stdio.h>
#include <omp.h>
#include "callbacks.h"
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
#pragma omp target parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
/// CHECK: 0: Could not register callback 'ompt_callback_target_map'
/// CHECK: Callback Init:
/// CHECK: Callback Load:
/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2
/// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4
/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2
/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2
/// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4
/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4
/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2
/// CHECK: Callback Fini:
|
GB_binop__pow_fc32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc32)
// C=scalar+B GB (_bind1st__pow_fc32)
// C=scalar+B' GB (_bind1st_tran__pow_fc32)
// C=A+scalar GB (_bind2nd__pow_fc32)
// C=A'+scalar GB (_bind2nd_tran__pow_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_cpowf (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_cpowf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FC32 || GxB_NO_POW_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_cpowf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_cpowf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_cpowf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_cpowf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_uint8_uint8.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_uint8_uint8
// op(A') function: GB_unop_tran__ainv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
softmax-inl.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
kvstore_dist_server.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "ps/ps.h"
#include "mxnet/kvstore.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
#include <utility> //pair
#include <algorithm> // sort
#include <cmath>
namespace mxnet {
namespace kvstore {
enum class CommandType {
kController, kStopServer, kSyncMode, kSetGradientCompression
};
enum class DataHandleType {
kDefaultPushPull, kCompressedPushPull, kRowSparsePushPull
};
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f(); blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<float>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct MergeBuf {
std::vector<ps::KVMeta> request;
NDArray array;
};
struct Dist2TMean {
public:
real_t value_;
real_t dist_;
Dist2TMean():value_(0.0),dist_(0.0) { }
Dist2TMean(real_t value, real_t dist):value_(value),dist_(dist) { }
~Dist2TMean() { }
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
if (recved_type == CommandType::kStopServer) {
exec_.Stop();
} else if (recved_type == CommandType::kSyncMode) {
sync_mode_ = true;
} else if (recved_type == CommandType::kSetGradientCompression) {
gradient_compression_->DecodeParams(recved.body);
} else {
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
}
app->Response(recved);
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
DataHandleType recved_type = static_cast<DataHandleType>(req_meta.cmd);
if (recved_type == DataHandleType::kRowSparsePushPull) {
DataHandleRowSparse(req_meta, req_data, server);
} else if (recved_type == DataHandleType::kCompressedPushPull) {
DataHandleCompressed(req_meta, req_data, server);
} else {
DataHandleDefault(req_meta, req_data, server);
}
return;
}
inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored,
ps::KVServer<real_t>* server) {
if (merged->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
if (updater_) {
exec_.Exec([this, key, merged, stored](){
CHECK(updater_);
updater_(key, merged->array, stored);
});
} else {
// if no updater, just copy
CopyFromTo(merged->array, stored);
}
if (log_verbose_) {
LOG(INFO) << "sync response to " << merged->request.size() << " workers";
}
for (const auto& req : merged->request) {
server->Response(req);
}
merged->request.clear();
stored->WaitToRead();
} else {
merged->array.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void DataHandleRowSparse(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
real_t* data = req_data.vals.data();
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context());
Engine::Get()->PushAsync(
[recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(),
recved.data().FlatTo1D<cpu, float>(), s);
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
stored.WaitToRead();
server->Response(req_meta);
return;
}
// synced push
if (sync_mode_) {
if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys;
auto& merged = merge_buf_[master_key];
/* merged type:
struct MergeBuf {
std::vector<ps::KVMeta> request;
NDArray array;
};
*/
if (merged.array.is_none()) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
}
if (num_rows == 0) {
// reset to zeros
if (merged.request.size() == 0) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
} else {
// nothing to aggregate
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
NDArray out(kRowSparseStorage, stored.shape(), Context());
std::vector<Engine::VarHandle> const_vars;
const_vars.push_back(recved.var());
const_vars.push_back(merged.array.var());
// accumulate row_sparse gradients
// TODO(haibin) override + operator for row_sparse NDArray
// instead of calling BinaryComputeRspRsp directly
using namespace mshadow;
Engine::Get()->PushAsync(
[recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {recved, merged.array}, {kWriteTo}, {out});
on_complete();
}, recved.ctx(), const_vars, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &merged.array, 0);
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
} else {
// async push
if (log_verbose_) LOG(INFO) << "async push: " << master_key;
if (num_rows == 0) {
server->Response(req_meta);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
exec_.Exec([this, master_key, &recved, &stored](){
CHECK(updater_);
updater_(master_key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
// pull
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<real_t> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const float* data = stored.data().dptr<float>();
auto len = unit_len * num_rows;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_len;
auto begin = (i - 1) * unit_len;
auto end = i * unit_len;
response.vals.segment(begin, end).CopyFrom(src, unit_len);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
}
void DefaultStorageResponse(int key, const NDArray& stored,
const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
ps::KVPairs<real_t> response;
CHECK(!stored.is_none()) << "init " << key << " first";
auto len = stored.shape().Size();
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*) req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.array, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.array += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(key, store_[key], req_meta, req_data, server);
}
}
/**
struct KVPairs {
// /** \brief empty constructor
// KVPairs() {}
/** \brief the list of keys
SArray<Key> keys;
/** \brief the according values
SArray<Val> vals;
/** \brief the according value lengths (could be empty)
SArray<int> lens;
};
*/
/*
/** \brief meta information about a kv request
struct KVMeta {
/** \brief the int cmd
int cmd;
/** \brief whether or not this is a push request
bool push;
/** \brief sender's node id
int sender;
/** \brief the associated timestamp
int timestamp;
/** \brief the customer id of worker
int customer_id;
};
*/
typedef std::pair<int, double> PAIR;
void getSortedScoreVector (const std::vector<ps::KVPairs<real_t>> &alldata_v, std::vector<PAIR> &idx_score_vec) {
int nd_size = alldata_v[0].lens[0];
for (int i = 0; i < alldata_v.size(); i++) {
real_t* a1 = (real_t*)alldata_v[i].vals.data();
real_t score = 0;
for (int j = 0; j < alldata_v.size(); j++) {
if (i == j) continue;
real_t* a2 = (real_t*)alldata_v[j].vals.data();
// calculate distance NDArray
for (int n = 0; n < nd_size; n++) {
score += (a1[n] - a2[n])*(a1[n] - a2[n]);
}
}
// store <index, score> pair into vector<int>
idx_score_vec.push_back(std::make_pair(i, score));
}
// sort vector
std::sort(idx_score_vec.begin(), idx_score_vec.end(), [](const PAIR &x, const PAIR &y) -> int {
return x.second < y.second;
});
}
void Krum(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) {
// calculate score and create pair
CHECK_GT(ps::NumWorkers()-byzt_num-2, 0) << "number of byzantine node is too big!";
std::vector<PAIR> idx_score_vec(0);
getSortedScoreVector(alldata_v, idx_score_vec);
int nd_size = alldata_v[0].lens[0];
// construct recved
for (int i = 0; i < ps::NumWorkers() - 2 - byzt_num; i++) { //ps::NumWorkers()-2-byt_num
real_t* ad = (real_t*)alldata_v[idx_score_vec[i].first].vals.data();
for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size()
res_sum[j] += ad[j];
}
}
// scale the array
for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size()
res_sum[j] *= ps::NumWorkers();
res_sum[j] /= ps::NumWorkers() - 2 - byzt_num;
}
}
void TrimmedMean(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) {
CHECK_GT(ps::NumWorkers() - 2 * byzt_num, 0) << "number of byzantine node is too big!";
int nd_size = alldata_v[0].lens[0];
int count = ps::NumWorkers()- 2 * byzt_num;
for (int dim = 0; dim < nd_size; dim++) {
std::vector<double> one_dim_vec(0);
for (int i = 0; i < ps::NumWorkers(); i++) {
real_t* data = (real_t*)alldata_v[i].vals.data();
one_dim_vec.push_back(data[dim]);
}
std::sort(one_dim_vec.begin(), one_dim_vec.end());
// sum up b-trimmed
for (int k = byzt_num; k < ps::NumWorkers() - byzt_num; k++) {
res_sum[dim] += one_dim_vec[k];
}
// calculate mean
res_sum[dim] /= count;
}
}
bool CompareByDist (const Dist2TMean &i, const Dist2TMean &j)
{
return (i.dist_ < j.dist_);
}
void CongAlgo(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) {
CHECK_GT(ps::NumWorkers() - 2 * byzt_num, 0) << "number of byzantine node is too big!";
int nd_size = alldata_v[0].lens[0];
int trimmedcount = ps::NumWorkers()- 2 * byzt_num;
int count = ps::NumWorkers() - byzt_num;
for (int dim = 0; dim < nd_size; dim++) {
std::vector<double> one_dim_vec(0);
for (int i = 0; i < ps::NumWorkers(); i++) {
real_t* data = (real_t*)alldata_v[i].vals.data();
one_dim_vec.push_back(data[dim]);
}
std::sort(one_dim_vec.begin(), one_dim_vec.end());
// calculate b-trimmed mean
real_t btmean = 0;
for (int k = byzt_num; k < ps::NumWorkers() - byzt_num; k++) {
btmean += one_dim_vec[k];
}
btmean /= trimmedcount;
// get n-q nearest neighbors
std::vector<Dist2TMean> dist_vec(0);
for (auto one_dim_data : one_dim_vec) {
Dist2TMean p(one_dim_data, abs(one_dim_data - btmean));
dist_vec.push_back(p);
}
std::sort(dist_vec.begin(), dist_vec.end(), [](const Dist2TMean &x, const Dist2TMean &y) {
return x.dist_ < y.dist_;
});
for (int i = 0; i < ps::NumWorkers() - byzt_num; i++) {
res_sum[dim] += dist_vec[i].value_;
}
res_sum[dim] /= count;
}
}
// namespace mxnet
void DataHandleDefault(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull));
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
if (stored.is_none()) {
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored
// initialization
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
server->Response(req_meta); // ?
stored.WaitToRead();
} else if (sync_mode_) {
/* ------ baseline-------
// synced push -- use merfed_buf_:It represents values from different workers being merged.
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context()); // Context()-cpu/gpu
}
uint byzt_num = 3;
if (merged.request.size() < byzt_num) {
real_t* b1 = (real_t*)req_data.vals.data();
for (uint n = 0; n < req_data.vals.size(); n++) {
b1[n] *= -100;
}
}
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
merged.array += recved;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
--------- baseline---------- */
auto& merged = merge_buf_[key];
merged.request.push_back(req_meta);
auto& alldata_v = all_push_buf_[key];
alldata_v.push_back(req_data);
// NDArray one_array = NDArray(dshape, Context());
// CopyFromTo(recved, &one_array, 0);
// push_vector.push_back(one_array);
// if (push_vector.size() < (size_t) ps::NumWorkers()){
// one_array.WaitToRead();
// }
// initialize merged.array
if (alldata_v.size() == (size_t) ps::NumWorkers()){
if (merged.array.is_none()) {
size_t ds[] = {(size_t)alldata_v[0].lens[0]};
TShape dshape(ds, ds + 1);
merged.array = NDArray(dshape, Context()); // Context()-cpu/gpu
}
// calculate similarity score for each data using every pair
real_t* res_sum;
res_sum = (real_t*)calloc(alldata_v[0].vals.size(), sizeof(real_t)); // size-bzt_num-2
// artificial byzantine by multiplying the first array by 5
real_t* a1 = (real_t*)alldata_v[0].vals.data();
// real_t* a3 = (real_t*)alldata_v[2].vals.data();
// real_t* a5 = (real_t*)alldata_v[4].vals.data();
for (int n = 0; n < alldata_v[0].vals.size(); n++) {
a1[n] *= -100;
// a3[n] *= -90;
// a5[n] *= -110;
}
int byzt_num = 1;
// ------ KRUM ---------
// Krum(alldata_v, res_sum, byzt_num);
// ------ TrimmedMean ---------
TrimmedMean(alldata_v, res_sum, byzt_num);
// ------ CongAlgo -----
// CongAlgo(alldata_v, res_sum, byzt_num);
// ------- test failure case with no Krum -------
// int nd_size = alldata_v[0].lens[0];
// for (int i = 0; i < ps::NumWorkers(); i++) { //ps::NumWorkers()-2-byt_num
// real_t* ad = (real_t*)alldata_v[i].vals.data();
// for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size()
// res_sum[j] += ad[j];
// }
// }
size_t ds[] = {(size_t)alldata_v[0].lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob(res_sum, dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored
CopyFromTo(recved, &merged.array, 0);
ApplyUpdates(key, &merged, &stored, server);
alldata_v.clear();
}
} else {
// async push
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored
exec_.Exec([this, key, &recved, &stored](){
CHECK(updater_);
updater_(key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
}
else {
// pull
DefaultStorageResponse(key, stored, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
// belows are also private variables
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, MergeBuf> merge_buf_;
/**
* \brief all_push_buf_ is a buffer used if sync_mode is true. It represents
* all pushed data from all workers in a single iteration. The store will be
* updated to an aggregated value when values from all workers are pushed
* into this buffer.
*/
std::unordered_map<int, std::vector<ps::KVPairs<real_t>>> all_push_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<float>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
GB_unop__identity_fp32_uint32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_uint32)
// op(A') function: GB (_unop_tran__identity_fp32_uint32)
// C type: float
// A type: uint32_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_uint32)
(
float *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
add_vect.c
|
#include <stdio.h>
#define N 100000
int main(int argc, char **argv)
{
int i;
int a[N], b[N], c[N], d[N];
for (i=0; i < N; i++)
{
b[i] = 2;
c[i] = 5;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
#pragma omp for
for (i = 0; i < N; i++)
d[i] = a[i] + b[i];
}
printf("a[%d] = %d\n", 1, a[1]);
printf("d[%d] = %d\n", 5, d[5]);
return 0;
}
|
3.race2.c
|
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N][N];
#pragma omp parallel for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
for (int k = 1; k < N; k++)
A[i][j][k] = A[i - 1][j][k];
}
// CHECK: Data Race detected
// END
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.