source
stringlengths
3
92
c
stringlengths
26
2.25M
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-252,256),ceild(4*t2-Nz-499,512)),ceild(4*t3-Ny-499,512));t4<=min(min(min(floord(4*Nt+Nx-9,512),floord(2*t1+Nx-3,512)),floord(4*t2+Nx-9,512)),floord(4*t3+Nx-9,512));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(512*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Sieve.c
#include <sys/types.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include "hif.h" #define NSRCH 100 int a_declaration; main() { char *prime; int i; pers_attach(); prime = (char *) malloc(NSRCH*sizeof(prime)); memset(prime,1,NSRCH*sizeof(prime)); #pragma omp target { for(i=2;i<NSRCH;i++) { // printf("i is %d and prime[i] is %d\n", i, prime[i]); if(prime[i]) { int idx=i*i; while(idx < NSRCH) { // printf("setting prime for idx %d to 0\n", idx); prime[idx] = 0; idx += i; } } } } //end pragma omp target int n=0; for(i=1;i<NSRCH;i++) { if(prime[i]) { n++; printf("%d is prime\n",i); } } printf("%s\n", (n==26) ? "PASSED" : "FAILED"); pers_detach(); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(32*t2-Nz-508,512)),ceild(16*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(16*t1+Nx+29,512)),floord(32*t2+Nx+28,512)),floord(16*t3+Nx+12,512)),floord(32*t1-32*t2+Nz+Nx+27,512));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),512*t4+510),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2018 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start () const { return src_range.m_start; } location_t get_finish () const { return src_range.m_finish; } location_t get_location () const { if (EXPR_HAS_LOCATION (value)) return EXPR_LOCATION (value); else return make_location (get_start (), get_start (), get_finish ()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error () { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { source_location locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* Whether any __GIMPLE specifier was specified. */ BOOL_BITFIELD gimple_p : 1; /* Whether any __RTL specifier was specified. */ BOOL_BITFIELD rtl_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); extern bool c_keyword_starts_typename (enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p (void); extern tree pushdecl (tree); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern bool start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (source_location, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (source_location, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (location_t, tree); extern bool same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree, bool = false); extern void c_incomplete_type_error (location_t, const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1 (tree, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree, location_t); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, bool, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int, rich_location *); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void finish_implicit_inits (location_t, struct obstack *); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *, location_t); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, location_t, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_case (location_t, location_t, tree, bool); extern void c_finish_case (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree *, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_finish_oacc_host_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree, enum c_omp_region_type); extern tree c_build_va_arg (location_t, tree, location_t, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); extern tree c_omp_clause_copy_ctor (tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); extern bool tag_exists_p (enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern void set_c_expr_source_range (c_expr *expr, location_t start, location_t finish); extern void set_c_expr_source_range (c_expr *expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
par_mgr.c
/*BHEADER********************************************************************** * Copyright (c) 2015, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #include <assert.h> /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> num_coarse_indexes) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-7; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; (mgr_data -> interp_type) = 2; (mgr_data -> restrict_type) = 0; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> Frelax_method) = 0; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> print_coarse_system) = 0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> l1_norms)[i]) hypre_TFree((mgr_data -> l1_norms)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } /* data for V-cycle F-relaxation */ if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); mgr_data -> FrelaxVcycleData = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); for (i=1; i < num_levels; i++) { hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels == 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to vtemp of mgr_data, which is already destroyed */ // hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); /* Points to ztemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Initialize/ set block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_Int *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker, HYPRE_Int cflag) { HYPRE_Int *cf_marker, i, row, nc; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker != NULL) { hypre_TFree(*CF_marker, HYPRE_MEMORY_HOST); } cf_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(cf_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &cf_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(cf_marker[row] == CMRK) continue; cf_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(cf_marker[row] == CMRK) nc++; else if(cf_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { cf_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker = cf_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } */ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; if(method == 0) { P_diag_data[jj_counter] = 0.0; } else if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } else if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int last_level, HYPRE_Int method, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build interpolation operator using (hypre default) */ if(!last_level) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,2,debug_flag,&P_ptr); } /* Do Jacobi interpolation for last level */ else { if (method <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,method,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { /* there should be no print statements that can't be turned off. Is this an error? */ //hypre_fprintf(stderr, "### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_block_jacobi (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_block_jacobi(A,f,u,blk_size,n_block,left_size,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> Frelax_method) = relax_method; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> restrict_type) = restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> interp_type) = interpType; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRBuildAff( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; hypre_printf("MGR Setup parameters: \n"); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Number of coarse indexes: %d\n", (mgr_data -> num_coarse_indexes)); hypre_printf("reserved coarse nodes size: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("F-relaxation Method: %d\n", (mgr_data -> Frelax_method)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Interpolation type: %d\n", (mgr_data -> interp_type)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Restriction type: %d\n", (mgr_data -> restrict_type)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); return hypre_error_flag; }
effects.c
#define _POSIX_C_SOURCE 200809 #define _XOPEN_SOURCE 700 #include <omp.h> #include <limits.h> #include <stdlib.h> #include <stdbool.h> #include <dlfcn.h> #include <string.h> #include <errno.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <spawn.h> #include <time.h> #include <stdio.h> #include "effects.h" #include "log.h" // glib might or might not have already defined MIN, // depending on whether we have pixbuf or not... #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif extern char **environ; static int screen_size_to_pix(struct swaylock_effect_screen_pos size, int screensize, int scale) { if (size.is_percent) { return (size.pos / 100.0) * screensize; } else if (size.pos > 0) { return size.pos * scale; } else { return size.pos; } } static int screen_pos_to_pix(struct swaylock_effect_screen_pos pos, int screensize, int scale) { int actual; if (pos.is_percent) { actual = (pos.pos / 100.0) * screensize; } else { actual = pos.pos * scale; } if (actual < 0) { actual = screensize + actual; } return actual; } static const char *effect_name(struct swaylock_effect *effect) { switch (effect->tag) { case EFFECT_BLUR: return "blur"; case EFFECT_PIXELATE: return "pixelate"; case EFFECT_SCALE: return "scale"; case EFFECT_GREYSCALE: return "greyscale"; case EFFECT_VIGNETTE: return "vignette"; case EFFECT_COMPOSE: return "compose"; case EFFECT_CUSTOM: return effect->e.custom; } abort(); } static void screen_pos_pair_to_pix( struct swaylock_effect_screen_pos posx, struct swaylock_effect_screen_pos posy, int objwidth, int objheight, int screenwidth, int screenheight, int scale, int gravity, int *outx, int *outy) { int x = screen_pos_to_pix(posx, screenwidth, scale); int y = screen_pos_to_pix(posy, screenheight, scale); // Adjust X switch (gravity) { case EFFECT_COMPOSE_GRAV_CENTER: case EFFECT_COMPOSE_GRAV_N: case EFFECT_COMPOSE_GRAV_S: x -= objwidth / 2; break; case EFFECT_COMPOSE_GRAV_NW: case EFFECT_COMPOSE_GRAV_SW: case EFFECT_COMPOSE_GRAV_W: break; case EFFECT_COMPOSE_GRAV_NE: case EFFECT_COMPOSE_GRAV_SE: case EFFECT_COMPOSE_GRAV_E: x -= objwidth; break; } // Adjust Y switch (gravity) { case EFFECT_COMPOSE_GRAV_CENTER: case EFFECT_COMPOSE_GRAV_W: case EFFECT_COMPOSE_GRAV_E: y -= objheight / 2; break; case EFFECT_COMPOSE_GRAV_NW: case EFFECT_COMPOSE_GRAV_NE: case EFFECT_COMPOSE_GRAV_N: break; case EFFECT_COMPOSE_GRAV_SW: case EFFECT_COMPOSE_GRAV_SE: case EFFECT_COMPOSE_GRAV_S: y -= objheight; break; } *outx = x; *outy = y; } static uint32_t blend_pixels(float alpha, uint32_t srcpix, uint32_t destpix) { uint8_t srcr = (srcpix & 0x00ff0000) >> 16; uint8_t destr = (destpix & 0x00ff0000) >> 16; uint8_t srcg = (srcpix & 0x0000ff00) >> 8; uint8_t destg = (destpix & 0x0000ff00) >> 8; uint8_t srcb = (srcpix & 0x000000ff) >> 0; uint8_t destb = (destpix & 0x000000ff) >> 0; return (uint32_t)0 | (uint32_t)255 << 24 | (uint32_t)(srcr + destr * (1 - alpha)) << 16 | (uint32_t)(srcg + destg * (1 - alpha)) << 8 | (uint32_t)(srcb + destb * (1 - alpha)) << 0; } static void blur_h(uint32_t *dest, uint32_t *src, int width, int height, int radius) { const int minradius = radius < width ? radius : width; #pragma omp parallel for for (int y = 0; y < height; ++y) { uint32_t *srow = src + y * width; uint32_t *drow = dest + y * width; // 'range' is float, because floating point division is usually faster // than integer division. int r_acc = 0; int g_acc = 0; int b_acc = 0; float range = minradius; // Accumulate the range (0..radius) for (int x = 0; x < minradius; ++x) { r_acc += (srow[x] & 0xff0000) >> 16; g_acc += (srow[x] & 0x00ff00) >> 8; b_acc += (srow[x] & 0x0000ff); } // Deal with the main body for (int x = 0; x < width; ++x) { if (x >= minradius) { r_acc -= (srow[x - radius] & 0xff0000) >> 16; g_acc -= (srow[x - radius] & 0x00ff00) >> 8; b_acc -= (srow[x - radius] & 0x0000ff); range -= 1; } if (x < width - minradius) { r_acc += (srow[x + radius] & 0xff0000) >> 16; g_acc += (srow[x + radius] & 0x00ff00) >> 8; b_acc += (srow[x + radius] & 0x0000ff); range += 1; } drow[x] = 0 | (int)(r_acc / range) << 16 | (int)(g_acc / range) << 8 | (int)(b_acc / range); } } } static void blur_v(uint32_t *dest, uint32_t *src, int width, int height, int radius) { const int minradius = radius < height ? radius : height; #pragma omp parallel for for (int x = 0; x < width; ++x) { uint32_t *scol = src + x; uint32_t *dcol = dest + x; // 'range' is float, because floating point division is usually faster // than integer division. int r_acc = 0; int g_acc = 0; int b_acc = 0; float range = minradius; // Accumulate the range (0..radius) for (int y = 0; y < minradius; ++y) { r_acc += (scol[y * width] & 0xff0000) >> 16; g_acc += (scol[y * width] & 0x00ff00) >> 8; b_acc += (scol[y * width] & 0x0000ff); } // Deal with the main body for (int y = 0; y < height; ++y) { if (y >= minradius) { r_acc -= (scol[(y - radius) * width] & 0xff0000) >> 16; g_acc -= (scol[(y - radius) * width] & 0x00ff00) >> 8; b_acc -= (scol[(y - radius) * width] & 0x0000ff); range -= 1; } if (y < height - minradius) { r_acc += (scol[(y + radius) * width] & 0xff0000) >> 16; g_acc += (scol[(y + radius) * width] & 0x00ff00) >> 8; b_acc += (scol[(y + radius) * width] & 0x0000ff); range += 1; } dcol[y * width] = 0 | (int)(r_acc / range) << 16 | (int)(g_acc / range) << 8 | (int)(b_acc / range); } } } static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch, int width, int height, int radius) { blur_h(scratch, src, width, height, radius); blur_v(dest, scratch, width, height, radius); } // This effect_blur function, and the associated blur_* functions, // are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid: // https://github.com/yvbbrjdr/i3lock-fancy-rapid static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height, int scale, int radius, int times) { uint32_t *origdest = dest; uint32_t *scratch = malloc(width * height * sizeof(*scratch)); blur_once(dest, src, scratch, width, height, radius * scale); for (int i = 0; i < times - 1; ++i) { uint32_t *tmp = src; src = dest; dest = tmp; blur_once(dest, src, scratch, width, height, radius * scale); } free(scratch); // We're flipping between using dest and src; // if the last buffer we used was src, copy that over to dest. if (dest != origdest) memcpy(origdest, dest, width * height * sizeof(*dest)); } static void effect_pixelate(uint32_t *data, int width, int height, int scale, int factor) { factor *= scale; #pragma omp parallel for for (int y = 0; y < height / factor + 1; ++y) { for (int x = 0; x < width / factor + 1; ++x) { int total_r = 0, total_g = 0, total_b = 0; int xstart = x * factor; int ystart = y * factor; int xlim = MIN(xstart + factor, width); int ylim = MIN(ystart + factor, height); // Average for (int ry = ystart; ry < ylim; ++ry) { for (int rx = xstart; rx < xlim; ++rx) { int index = ry * width + rx; total_r += (data[index] & 0xff0000) >> 16; total_g += (data[index] & 0x00ff00) >> 8; total_b += (data[index] & 0x0000ff); } } int r = total_r / (factor * factor); int g = total_g / (factor * factor); int b = total_b / (factor * factor); // Fill pixels for (int ry = ystart; ry < ylim; ++ry) { for (int rx = xstart; rx < xlim; ++rx) { int index = ry * width + rx; data[index] = r << 16 | g << 8 | b; } } } } } static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight, double scale) { int dwidth = swidth * scale; int dheight = sheight * scale; double fact = 1.0 / scale; #pragma omp parallel for for (int dy = 0; dy < dheight; ++dy) { int sy = dy * fact; if (sy >= sheight) continue; for (int dx = 0; dx < dwidth; ++dx) { int sx = dx * fact; if (sx >= swidth) continue; dest[dy * dwidth + dx] = src[sy * swidth + sx]; } } } static void effect_greyscale(uint32_t *data, int width, int height) { #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { int index = y * width + x; int r = (data[index] & 0xff0000) >> 16; int g = (data[index] & 0x00ff00) >> 8; int b = (data[index] & 0x0000ff); int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b; if (luma < 0) luma = 0; if (luma > 255) luma = 255; luma &= 0xFF; data[index] = luma << 16 | luma << 8 | luma; } } } static void effect_vignette(uint32_t *data, int width, int height, double base, double factor) { base = fmin(1, fmax(0, base)); factor = fmin(1 - base, fmax(0, factor)); #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { double xf = (x * 1.0) / width; double yf = (y * 1.0) / height; double vignette_factor = base + factor * 16 * xf * yf * (1.0 - xf) * (1.0 - yf); int index = y * width + x; int r = (data[index] & 0xff0000) >> 16; int g = (data[index] & 0x00ff00) >> 8; int b = (data[index] & 0x0000ff); r = (int)(r * vignette_factor) & 0xFF; g = (int)(g * vignette_factor) & 0xFF; b = (int)(b * vignette_factor) & 0xFF; data[index] = r << 16 | g << 8 | b; } } } static void effect_compose(uint32_t *data, int width, int height, int scale, struct swaylock_effect_screen_pos posx, struct swaylock_effect_screen_pos posy, struct swaylock_effect_screen_pos posw, struct swaylock_effect_screen_pos posh, int gravity, char *imgpath) { #if !HAVE_GDK_PIXBUF (void)&blend_pixels; (void)&screen_size_to_pix; (void)&screen_pos_pair_to_pix; swaylock_log(LOG_ERROR, "Compose effect: Compiled without gdk_pixbuf support.\n"); return; #else int imgw = screen_size_to_pix(posw, width, scale); int imgh = screen_size_to_pix(posh, height, scale); bool preserve_aspect = imgw < 0 || imgh < 0; GError *err = NULL; GdkPixbuf *pixbuf = gdk_pixbuf_new_from_file_at_scale( imgpath, imgw, imgh, preserve_aspect, &err); if (!pixbuf) { swaylock_log(LOG_ERROR, "Compose effect: Failed to load image file '%s' (%s).", imgpath, err->message); g_error_free(err); return; } cairo_surface_t *image = gdk_cairo_image_surface_create_from_pixbuf(pixbuf); g_object_unref(pixbuf); int bufw = cairo_image_surface_get_width(image); int bufh = cairo_image_surface_get_height(image); uint32_t *bufdata = (uint32_t *)cairo_image_surface_get_data(image); int bufstride = cairo_image_surface_get_stride(image) / 4; bool bufalpha = cairo_image_surface_get_format(image) == CAIRO_FORMAT_ARGB32; int imgx, imgy; screen_pos_pair_to_pix( posx, posy, bufw, bufh, width, height, scale, gravity, &imgx, &imgy); #pragma omp parallel for for (int offy = 0; offy < bufh; ++offy) { if (offy + imgy < 0 || offy + imgy > height) continue; for (int offx = 0; offx < bufw; ++offx) { if (offx + imgx < 0 || offx + imgx > width) continue; size_t idx = (size_t)(offy + imgy) * width + (offx + imgx); size_t bufidx = (size_t)offy * bufstride + (offx); if (!bufalpha) { data[idx] = bufdata[bufidx]; } else { uint8_t alpha = (bufdata[bufidx] & 0xff000000) >> 24; if (alpha == 255) { data[idx] = bufdata[bufidx]; } else if (alpha != 0) { data[idx] = blend_pixels(alpha / 255.0, bufdata[bufidx], data[idx]); } } } } cairo_surface_destroy(image); #endif } static void effect_custom_run(uint32_t *data, int width, int height, int scale, char *path) { void *dl = dlopen(path, RTLD_LAZY); if (dl == NULL) { swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror()); return; } void (*effect_func)(uint32_t *data, int width, int height, int scale) = dlsym(dl, "swaylock_effect"); if (effect_func != NULL) { effect_func(data, width, height, scale); dlclose(dl); return; } uint32_t (*pixel_func)(uint32_t pix, int x, int y, int width, int height) = dlsym(dl, "swaylock_pixel"); if (pixel_func != NULL) { #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { data[y * width + x] = pixel_func(data[y * width + x], x, y, width, height); } } dlclose(dl); return; } (void)dlsym(dl, "swaylock_effect"); // Change the result of dlerror() swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror()); } static bool file_is_outdated(const char *input, const char *output) { struct stat instat, outstat; if (stat(input, &instat) < 0) { return true; } if (stat(output, &outstat) < 0) { return true; } if (instat.st_mtim.tv_sec > outstat.st_mtim.tv_sec) { return true; } if ( instat.st_mtim.tv_sec == outstat.st_mtim.tv_sec && instat.st_mtim.tv_nsec >= outstat.st_mtim.tv_nsec) { return true; } return false; } static char *effect_custom_compile(const char *path) { static char *cachepath = NULL; static size_t cachelen; if (!cachepath) { char *xdgdir = getenv("XDG_DATA_HOME"); if (xdgdir) { cachepath = malloc(strlen(xdgdir) + strlen("/swaylock") + 1); cachelen = sprintf(cachepath, "%s/swaylock", xdgdir); } else { char *homedir = getenv("HOME"); if (homedir == NULL) { swaylock_log(LOG_ERROR, "Can't compile custom effect; neither $HOME nor $XDG_CONFIG_HOME " "is defined."); return NULL; } cachepath = malloc(strlen(homedir) + strlen("/.cache/swaylock") + 1); cachelen = sprintf(cachepath, "%s/.cache/swaylock", homedir); } if (mkdir(cachepath, 0777) < 0 && errno != EEXIST) { swaylock_log(LOG_ERROR, "Can't compile custom effect; mkdir %s failed: %s\n", cachepath, strerror(errno)); free(cachepath); cachepath = NULL; return NULL; } } // Find the true, absolute path of the input file char *abspath = realpath(path, NULL); size_t abspathlen = strlen(abspath); char *outpath = malloc(cachelen + 1 + abspathlen + 3 + 1); size_t outlen = sprintf(outpath, "%s/%s.so", cachepath, abspath); // Sanitize for (char *ch = outpath + cachelen + 1; ch < outpath + cachelen + 1 + abspathlen; ++ch) { if (!( (*ch >= 'a' && *ch <= 'z') || (*ch >= 'A' && *ch <= 'Z') || (*ch >= '0' && *ch <= '9') || (*ch == '.'))) { *ch = '_'; } } if (!file_is_outdated(path, outpath)) { free(abspath); return outpath; } static const char *fmt = "cc -shared -g -O2 -march=native -fopenmp -o '%s' '%s' -lm"; char *cmd = malloc(strlen(fmt) + outlen - 2 + abspathlen - 2 + 1); sprintf(cmd, fmt, outpath, abspath); free(abspath); fprintf(stderr, "Compiling custom effect: %s\n", cmd); // Finally, compile. int ret = system(cmd); free(cmd); if (ret != 0) { if (ret == -1) { swaylock_log(LOG_ERROR, "Custom effect: system(): %s", strerror(errno)); free(outpath); return NULL; } else { swaylock_log(LOG_ERROR, "Custom effect compilation failed\n"); free(outpath); return NULL; } } return outpath; } static void effect_custom(uint32_t *data, int width, int height, int scale, char *path) { size_t pathlen = strlen(path); if (pathlen > 3 && strcmp(path + pathlen - 3, ".so") == 0) { effect_custom_run(data, width, height, scale, path); } else if (pathlen > 2 && strcmp(path + pathlen - 2, ".c") == 0) { char *compiled = effect_custom_compile(path); if (compiled != NULL) { effect_custom_run(data, width, height, scale, compiled); free(compiled); } } else { swaylock_log( LOG_ERROR, "%s: Unknown file type for custom effect (expected .c or .so)", path); } } static cairo_surface_t *run_effect(cairo_surface_t *surface, int scale, struct swaylock_effect *effect) { switch (effect->tag) { case EFFECT_BLUR: { cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { swaylock_log(LOG_ERROR, "Failed to create surface for blur effect"); cairo_surface_destroy(surf); break; } effect_blur( (uint32_t *)cairo_image_surface_get_data(surf), (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.blur.radius, effect->e.blur.times); cairo_surface_flush(surf); cairo_surface_destroy(surface); surface = surf; break; } case EFFECT_PIXELATE: { effect_pixelate( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.pixelate.factor); cairo_surface_flush(surface); break; } case EFFECT_SCALE: { cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface) * effect->e.scale, cairo_image_surface_get_height(surface) * effect->e.scale); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { swaylock_log(LOG_ERROR, "Failed to create surface for scale effect"); cairo_surface_destroy(surf); break; } effect_scale( (uint32_t *)cairo_image_surface_get_data(surf), (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), effect->e.scale); cairo_surface_flush(surf); cairo_surface_destroy(surface); surface = surf; break; } case EFFECT_GREYSCALE: { effect_greyscale( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); cairo_surface_flush(surface); break; } case EFFECT_VIGNETTE: { effect_vignette( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), effect->e.vignette.base, effect->e.vignette.factor); cairo_surface_flush(surface); break; } case EFFECT_COMPOSE: { effect_compose( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.compose.x, effect->e.compose.y, effect->e.compose.w, effect->e.compose.h, effect->e.compose.gravity, effect->e.compose.imgpath); cairo_surface_flush(surface); break; } case EFFECT_CUSTOM: { effect_custom( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.custom); cairo_surface_flush(surface); break; } } return surface; } static cairo_surface_t *ensure_format(cairo_surface_t *surface) { if (cairo_image_surface_get_format(surface) == CAIRO_FORMAT_RGB24) { return surface; } swaylock_log(LOG_DEBUG, "Have to convert surface to CAIRO_FORMAT_RGB24 from %i.", (int)cairo_image_surface_get_format(surface)); cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { swaylock_log(LOG_ERROR, "Failed to create surface for scale effect"); cairo_surface_destroy(surf); return NULL; } memcpy( cairo_image_surface_get_data(surf), cairo_image_surface_get_data(surface), cairo_image_surface_get_stride(surface) * cairo_image_surface_get_height(surface)); cairo_surface_destroy(surface); return surf; } cairo_surface_t *swaylock_effects_run(cairo_surface_t *surface, int scale, struct swaylock_effect *effects, int count) { surface = ensure_format(surface); if (surface == NULL) return NULL; for (int i = 0; i < count; ++i) { struct swaylock_effect *effect = &effects[i]; surface = run_effect(surface, scale, effect); } return surface; } #define TIME_MSEC(tv) ((tv).tv_sec * 1000.0 + (tv).tv_nsec / 1000000.0) #define TIME_DELTA(first, last) (TIME_MSEC(last) - TIME_MSEC(first)) cairo_surface_t *swaylock_effects_run_timed(cairo_surface_t *surface, int scale, struct swaylock_effect *effects, int count) { struct timespec start_tv; clock_gettime(CLOCK_MONOTONIC, &start_tv); surface = ensure_format(surface); if (surface == NULL) return NULL; fprintf(stderr, "Running %i effects:\n", count); for (int i = 0; i < count; ++i) { struct timespec effect_start_tv; clock_gettime(CLOCK_MONOTONIC, &effect_start_tv); struct swaylock_effect *effect = &effects[i]; surface = run_effect(surface, scale, effect); struct timespec effect_end_tv; clock_gettime(CLOCK_MONOTONIC, &effect_end_tv); fprintf(stderr, " %s: %fms\n", effect_name(effect), TIME_DELTA(effect_start_tv, effect_end_tv)); } struct timespec end_tv; clock_gettime(CLOCK_MONOTONIC, &end_tv); fprintf(stderr, "Effects took %fms.\n", TIME_DELTA(start_tv, end_tv)); return surface; }
166. QR Eigen Values.c
/** * @file * \brief Compute real eigen values and eigen vectors of a symmetric matrix * method. * */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "qr_decompose.h" #ifdef _OPENMP #include <omp.h> #endif #define LIMS 9 /**< limit of range of matrix values */ #define EPSILON 1e-10 /**< accuracy tolerance limit */ /** * create a square matrix of given size with random elements * \param[out] A matrix to create (must be pre-allocated in memory) * \param[in] N matrix size */ void create_matrix(double **A, int N) { int i, j, tmp, lim2 = LIMS >> 1; #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < N; i++) { A[i][i] = (rand() % LIMS) - lim2; for (j = i + 1; j < N; j++) { tmp = (rand() % LIMS) - lim2; A[i][j] = tmp; A[j][i] = tmp; } } } /** * Perform multiplication of two matrices. * * R2 must be equal to C1 * * Resultant matrix size should be R1xC2 * \param[in] A first matrix to multiply * \param[in] B second matrix to multiply * \param[out] OUT output matrix (must be pre-allocated) * \param[in] R1 number of rows of first matrix * \param[in] C1 number of columns of first matrix * \param[in] R2 number of rows of second matrix * \param[in] C2 number of columns of second matrix * \returns pointer to resultant matrix */ double **mat_mul(double **A, double **B, double **OUT, int R1, int C1, int R2, int C2) { if (C1 != R2) { perror("Matrix dimensions mismatch!"); return OUT; } int i; #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < R1; i++) { for (int j = 0; j < C2; j++) { OUT[i][j] = 0.f; for (int k = 0; k < C1; k++) OUT[i][j] += A[i][k] * B[k][j]; } } return OUT; } /** Compute eigen values using iterative shifted QR decomposition algorithm as * follows: * 1. Use last diagonal element of A as eigen value approximation \f$c\f$ * 2. Shift diagonals of matrix \f$A' = A - cI\f$ * 3. Decompose matrix \f$A'=QR\f$ * 4. Compute next approximation \f$A'_1 = RQ \f$ * 5. Shift diagonals back \f$A_1 = A'_1 + cI\f$ * 6. Termination condition check: last element below diagonal is almost 0 * 1. If not 0, go back to step 1 with the new approximation \f$A_1\f$ * 2. If 0, continue to step 7 * 7. Save last known \f$c\f$ as the eigen value. * 8. Are all eigen values found? * 1. If not, remove last row and column of \f$A_1\f$ and go back to step 1. * 2. If yes, stop. * * \note The matrix \f$A\f$ gets modified * * \param[in,out] A matrix to compute eigen values for * \param[out] eigen_vals resultant vector containing computed eigen values * \param[in] mat_size matrix size * \param[in] debug_print 1 to print intermediate Q & R matrices, 0 for not to * \returns time for computation in seconds */ double eigen_values(double **A, double *eigen_vals, int mat_size, char debug_print) { if (!eigen_vals) { perror("Output eigen value vector cannot be NULL!"); return -1; } double **R = (double **)malloc(sizeof(double *) * mat_size); double **Q = (double **)malloc(sizeof(double *) * mat_size); if (!Q || !R) { perror("Unable to allocate memory for Q & R!"); if (Q) { free(Q); } if (R) { free(R); } return -1; } /* allocate dynamic memory for matrices */ for (int i = 0; i < mat_size; i++) { R[i] = (double *)malloc(sizeof(double) * mat_size); Q[i] = (double *)malloc(sizeof(double) * mat_size); if (!Q[i] || !R[i]) { perror("Unable to allocate memory for Q & R."); for (; i >= 0; i--) { free(R[i]); free(Q[i]); } free(Q); free(R); return -1; } } if (debug_print) { print_matrix(A, mat_size, mat_size); } int rows = mat_size, columns = mat_size; int counter = 0, num_eigs = rows - 1; double last_eig = 0; clock_t t1 = clock(); while (num_eigs > 0) /* continue till all eigen values are found */ { /* iterate with QR decomposition */ while (fabs(A[num_eigs][num_eigs - 1]) > EPSILON) { last_eig = A[num_eigs][num_eigs]; for (int i = 0; i < rows; i++) A[i][i] -= last_eig; /* A - cI */ qr_decompose(A, Q, R, rows, columns); if (debug_print) { print_matrix(A, rows, columns); print_matrix(Q, rows, columns); print_matrix(R, columns, columns); printf("-------------------- %d ---------------------\n", ++counter); } mat_mul(R, Q, A, columns, columns, rows, columns); for (int i = 0; i < rows; i++) A[i][i] += last_eig; /* A + cI */ } /* store the converged eigen value */ eigen_vals[num_eigs] = last_eig; if (debug_print) { printf("========================\n"); printf("Eigen value: % g,\n", last_eig); printf("========================\n"); } num_eigs--; rows--; columns--; } eigen_vals[0] = A[0][0]; double dtime = (double)(clock() - t1) / CLOCKS_PER_SEC; if (debug_print) { print_matrix(R, mat_size, mat_size); print_matrix(Q, mat_size, mat_size); } /* cleanup dynamic memory */ for (int i = 0; i < mat_size; i++) { free(R[i]); free(Q[i]); } free(R); free(Q); return dtime; } /** * test function to compute eigen values of a 2x2 matrix * \f[\begin{bmatrix} * 5 & 7\\ * 7 & 11 * \end{bmatrix}\f] * which are approximately, {15.56158, 0.384227} */ void test1() { int mat_size = 2; double X[][2] = {{5, 7}, {7, 11}}; double y[] = {15.56158, 0.384227}; // corresponding y-values double eig_vals[2] = {0, 0}; // The following steps are to convert a "double[][]" to "double **" double **A = (double **)malloc(mat_size * sizeof(double *)); for (int i = 0; i < mat_size; i++) A[i] = X[i]; printf("------- Test 1 -------\n"); double dtime = eigen_values(A, eig_vals, mat_size, 0); for (int i = 0; i < mat_size; i++) { printf("%d/5 Checking for %.3g --> ", i + 1, y[i]); char result = 0; for (int j = 0; j < mat_size && !result; j++) { if (fabs(y[i] - eig_vals[j]) < 0.1) { result = 1; printf("(%.3g) ", eig_vals[j]); } } // ensure that i^th expected eigen value was computed assert(result != 0); printf("found\n"); } printf("Test 1 Passed in %.3g sec\n\n", dtime); free(A); } /** * test function to compute eigen values of a 2x2 matrix * \f[\begin{bmatrix} * -4& 4& 2& 0& -3\\ * 4& -4& 4& -3& -1\\ * 2& 4& 4& 3& -3\\ * 0& -3& 3& -1&-1\\ * -3& -1& -3& -3& 0 * \end{bmatrix}\f] * which are approximately, {9.27648, -9.26948, 2.0181, -1.03516, -5.98994} */ void test2() { int mat_size = 5; double X[][5] = {{-4, 4, 2, 0, -3}, {4, -4, 4, -3, -1}, {2, 4, 4, 3, -3}, {0, -3, 3, -1, -3}, {-3, -1, -3, -3, 0}}; double y[] = {9.27648, -9.26948, 2.0181, -1.03516, -5.98994}; // corresponding y-values double eig_vals[5]; // The following steps are to convert a "double[][]" to "double **" double **A = (double **)malloc(mat_size * sizeof(double *)); for (int i = 0; i < mat_size; i++) A[i] = X[i]; printf("------- Test 2 -------\n"); double dtime = eigen_values(A, eig_vals, mat_size, 0); for (int i = 0; i < mat_size; i++) { printf("%d/5 Checking for %.3g --> ", i + 1, y[i]); char result = 0; for (int j = 0; j < mat_size && !result; j++) { if (fabs(y[i] - eig_vals[j]) < 0.1) { result = 1; printf("(%.3g) ", eig_vals[j]); } } // ensure that i^th expected eigen value was computed assert(result != 0); printf("found\n"); } printf("Test 2 Passed in %.3g sec\n\n", dtime); free(A); } /** * main function */ int main(int argc, char **argv) { srand(time(NULL)); int mat_size = 5; if (argc == 2) { mat_size = atoi(argv[1]); } else { // if invalid input argument is given run tests test1(); test2(); printf("Usage: ./qr_eigen_values [mat_size]\n"); return 0; } if (mat_size < 2) { fprintf(stderr, "Matrix size should be > 2\n"); return -1; } int i; double **A = (double **)malloc(sizeof(double *) * mat_size); /* number of eigen values = matrix size */ double *eigen_vals = (double *)malloc(sizeof(double) * mat_size); if (!eigen_vals) { perror("Unable to allocate memory for eigen values!"); free(A); return -1; } for (i = 0; i < mat_size; i++) { A[i] = (double *)malloc(sizeof(double) * mat_size); eigen_vals[i] = 0.f; } /* create a random matrix */ create_matrix(A, mat_size); print_matrix(A, mat_size, mat_size); double dtime = eigen_values(A, eigen_vals, mat_size, 0); printf("Eigen vals: "); for (i = 0; i < mat_size; i++) printf("% 9.4g\t", eigen_vals[i]); printf("\nTime taken to compute: % .4g sec\n", dtime); for (int i = 0; i < mat_size; i++) free(A[i]); free(A); free(eigen_vals); return 0; }
selu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: [email protected] */ #include <math.h> #include <arm_neon.h> #include "neon_mathfun.h" #include "selu_kernel_arm.h" void selu_kernel(int i, int id, void* data, const float* input, float* output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = (( int* )data)[0]; float32x4_t _one = vdupq_n_f32(1.f); float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float* cur_input = input + id * step; float* cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0.f) cur_output[0] = (exp(cur_input[0]) - 1.f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
GB_unaryop__lnot_int8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_int8 // op(A') function: GB_tran__lnot_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_int8 ( int8_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
array_args.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <utility> #include <vector> namespace LightGBM { /*! * \brief Contains some operation for an array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 1024) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v) {} while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; } // Note: k refer to index here. e.g. k=0 means get the max number. inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); // if find or all elements are the same. if ((k > l && k < r) || (l == start - 1 && r == end - 1)) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l + 1, k); } else { return ArgMaxAtK(arr, r, end, k); } } // Note: k is 1-based here. e.g. k=3 means get the top-3 numbers. inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } inline static void Assign(std::vector<VAL_T>* array, VAL_T t, size_t n) { array->resize(n); for (size_t i = 0; i < array->size(); ++i) { (*array)[i] = t; } } inline static bool CheckAllZero(const std::vector<VAL_T>& array) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != VAL_T(0)) { return false; } } return true; } inline static bool CheckAll(const std::vector<VAL_T>& array, VAL_T t) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != t) { return false; } } return true; } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
IOLayersRules.h
// Copyright 2016-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #ifndef INPUTLAYER_H #define INPUTLAYER_H // Rulebook Format // rules[0][0] == mode // rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2) // rules[0][2] == nInputRows // rules[0][3] == nOutputRows // rules[1] nOutputRows x (1+maxActive) // mode 0==guaranteed unique 1==overwrite, 2=keep, 3=sum, 4=mean template <Int dimension> void inputLayerRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords, Int nInputRows, Int nInputColumns, Int batchSize, Int mode, Int &nActive) { assert(nActive == 0); assert(rules.size() == 0); assert(SGs.size() == 0); SGs.resize(batchSize); // Set a minimum batch size if necessary Point<dimension> p; if (mode == 0) { nActive = nInputRows; rules.resize(1); rules[0].push_back(mode); rules[0].push_back(1); rules[0].push_back(nInputRows); rules[0].push_back(nInputRows); if (nInputColumns == dimension) { SGs.resize(1); auto &sg = SGs[0]; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; coords += dimension; sg.mp[p] = i; } } else { // nInputColumns == dimension + 1 Int idx; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; idx = coords[dimension]; coords += dimension + 1; if (idx + 1 >= (Int)SGs.size()) SGs.resize(idx + 1); SGs[idx].mp[p] = i; } } return; } // Compile list of how input rows correspond to output rows std::vector<std::vector<Int>> outputRows; if (nInputColumns == dimension) { SGs.resize(1); auto &sg = SGs[0]; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; coords += dimension; auto iter = sg.mp.find(p); if (iter == sg.mp.end()) { sg.mp[p] = nActive++; outputRows.resize(nActive); } outputRows[sg.mp[p]].push_back(i); } } else { // nInputColumns == dimension + 1 Int idx; for (Int i = 0; i < nInputRows; ++i) { for (Int j = 0; j < dimension; j++) p[j] = coords[j]; idx = coords[dimension]; coords += dimension + 1; if (idx + 1 >= (Int)SGs.size()) SGs.resize(idx + 1); auto &sg = SGs[idx]; auto iter = sg.mp.find(p); if (iter == sg.mp.end()) { sg.mp[p] = nActive++; outputRows.resize(nActive); } outputRows[sg.mp[p]].push_back(i); } } rules.resize(2); rules[0].push_back(mode); rules[0].push_back(1); // replace with maxActive if mode==3 or 4 rules[0].push_back(nInputRows); rules[0].push_back(outputRows.size()); auto &rule = rules[1]; if (mode == 1) { for (Int i = 0; i < nActive; ++i) { rule.push_back(1); rule.push_back(outputRows[i].front()); } } if (mode == 2) { for (Int i = 0; i < nActive; ++i) { rule.push_back(1); rule.push_back(outputRows[i].back()); } } if (mode == 3 or mode == 4) { Int maxActive = 0; for (auto &row : outputRows) maxActive = std::max(maxActive, (Int)row.size()); rules[0][1] = maxActive; for (auto &row : outputRows) { rule.push_back(row.size()); for (auto &r : row) rule.push_back(r); rule.resize((rule.size() + maxActive) / (maxActive + 1) * (maxActive + 1)); } } } // Rulebook Format // rules[0][0] == mode // rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2) // rules[0][2] == batchSize // rules[0][3] == length // rules[0][4] == nOutputRows // rules[1] nOutputRows x (1+maxActive) // bl is a batchSize x length x dimension long array of coordinates // mode 0==guaranteed unique and all present; 1==overwrite, 2=keep, 3=sum, // 4=mean template <Int dimension> void blRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords, Int batchSize, Int length, Int mode, Int &nActive) { assert(nActive == 0); assert(rules.size() == 0); assert(SGs.size() == 0); SGs.resize(batchSize); Int I; if (mode == 0) { nActive = batchSize * length; rules.resize(1); rules[0].push_back(mode); rules[0].push_back(1); rules[0].push_back(batchSize); rules[0].push_back(length); rules[0].push_back(nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &sg = SGs[I]; sg.ctr = I * length; auto c = coords + I * length * dimension; Point<dimension> p; for (Int l = 0; l < length; ++l) { for (Int j = 0; j < dimension; ++j) p[j] = c[j]; c += dimension; sg.mp[p] = l; } } return; } // Compile list of how input rows correspond to output rows std::vector<std::vector<std::vector<Int>>> outputRows(batchSize); std::vector<Int> nActives(batchSize); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &sg = SGs[I]; auto &ors = outputRows[I]; auto &nAct = nActives[I]; auto c = coords + I * length * dimension; Int i = I * length; Point<dimension> p; for (Int l = 0; l < length; ++l, ++i) { for (Int j = 0; j < dimension; ++j) p[j] = *c++; if (p[0] >= 0) { auto iter = sg.mp.find(p); if (iter == sg.mp.end()) { sg.mp[p] = nAct++; ors.resize(nAct); } ors[sg.mp[p]].push_back(i); } } } for (I = 0; I < batchSize; I++) { SGs[I].ctr = nActive; nActive += nActives[I]; } Int maxActive = 1; if (mode >= 3) for (auto &ors : outputRows) for (auto &row : ors) maxActive = std::max(maxActive, (Int)row.size()); rules.resize(2); rules[0].push_back(mode); rules[0].push_back(maxActive); rules[0].push_back(batchSize); rules[0].push_back(length); rules[0].push_back(nActive); auto &rule = rules[1]; if (mode == 1) { rule.resize(2 * nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &ors = outputRows[I]; auto rr = &rule[SGs[I].ctr * 2]; for (auto &row : ors) { rr[0] = row.size(); rr[1] = row.back(); rr += 2; } } } if (mode == 2) { rule.resize(2 * nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &ors = outputRows[I]; auto rr = &rule[SGs[I].ctr * 2]; for (auto &row : ors) { rr[0] = row.size(); rr[1] = row.front(); rr += 2; } } } if (mode == 3 or mode == 4) { rule.resize((maxActive + 1) * nActive); #pragma omp parallel for private(I) for (I = 0; I < batchSize; I++) { auto &ors = outputRows[I]; auto rr = &rule[SGs[I].ctr * (maxActive + 1)]; for (auto &row : ors) { rr[0] = row.size(); for (Int i = 0; i < (Int)row.size(); ++i) rr[i + 1] = row[i]; rr += 1 + maxActive; } } } } #endif /* INPUTLAYER_H */
Albus_spmv.h
#include<iostream> #include<stdio.h> #include<math.h> #include<time.h> #include<omp.h> #include<immintrin.h> #include<cstring> #include<sys/time.h> #include<stdlib.h> using namespace std; #define INT int #define DOU double #define AVX_DOU __m256d #define SSE_DOU __m128d inline DOU SIMD_fast1(INT start1,INT num,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict vec_val) { DOU answer; switch(num) { case 4 : { register SSE_DOU mtx_3 , vec_3 , ans_3 , mtx_3_1 , vec_3_1; register INT s1,s2,s3; s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; mtx_3 = _mm_load_pd(mtx_val+start1); mtx_3_1 = _mm_load_pd(mtx_val+s2); vec_3 = _mm_set_pd(vec_val[col_idx[s1]],vec_val[col_idx[start1]]); vec_3_1 = _mm_set_pd(vec_val[col_idx[s3]],vec_val[col_idx[s2]]); ans_3 = _mm_fmadd_pd(mtx_3_1,vec_3_1,_mm_mul_pd(mtx_3,vec_3)); answer = ans_3[0]+ans_3[1]; return answer; } default : { register AVX_DOU mtx_ans_1,mtx_3,vec_3; register INT s1,s2,s3; register INT t = num & (~3); register INT start2 = start1 + t; register INT num_1 = num & 3; s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; _mm_prefetch((DOU *)&mtx_val[start1+16],_MM_HINT_T0); _mm_prefetch((DOU *)&col_idx[start1+16],_MM_HINT_T0); mtx_3 = _mm256_load_pd(mtx_val+start1); vec_3 = _mm256_set_pd(vec_val[col_idx[s3]],vec_val[col_idx[s2]],vec_val[col_idx[s1]],vec_val[col_idx[start1]]); mtx_ans_1 = _mm256_mul_pd(mtx_3,vec_3); start1 += 4; #pragma unroll(32) for(;start1<start2;start1+=4) { s1 = start1 + 1; s2 = start1 + 2; s3 = start1 + 3; mtx_3 = _mm256_load_pd(mtx_val+start1); vec_3 = _mm256_setr_pd(vec_val[col_idx[start1]],vec_val[col_idx[s1]],vec_val[col_idx[s2]],vec_val[col_idx[s3]]); mtx_ans_1 = _mm256_fmadd_pd(mtx_3,vec_3,mtx_ans_1); } switch (num_1) { case 0 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; return answer; } case 1 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; answer = answer + (mtx_val[start2]*vec_val[col_idx[start2]]); return answer; } case 2 : { mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; s1 = start2 + 1; answer = answer+(mtx_val[start2]*vec_val[col_idx[start2]]+mtx_val[s1]*vec_val[col_idx[s1]]); return answer; } default : { s1 = start2 + 1; s2 = start2 + 2; mtx_3 = _mm256_load_pd(mtx_val+start2); vec_3 = _mm256_set_pd(0,vec_val[col_idx[s2]],vec_val[col_idx[s1]],vec_val[col_idx[start2]]); mtx_ans_1 = _mm256_fmadd_pd(mtx_3,vec_3,mtx_ans_1); mtx_ans_1 = _mm256_hadd_pd(mtx_ans_1,mtx_ans_1); answer = mtx_ans_1[0] + mtx_ans_1[2]; return answer; } } } } } inline DOU SIMD_fast2(INT start1,INT num,INT * __restrict__ row_ptr,INT * __restrict__ col_idx,DOU * __restrict__ mtx_val,DOU * __restrict__ vec_val) { register DOU answer; switch(num) { case 0 : return 0; case 1 : { answer = mtx_val[start1] * vec_val[col_idx[start1]]; return answer; } case 2 : { register INT s1; s1 = start1 + 1; answer = mtx_val[start1] * vec_val[col_idx[start1]] + mtx_val[s1] * vec_val[col_idx[s1]]; return answer; } case 3 : { register SSE_DOU mtx_3 , vec_3 , ans_3 , mtx_3_1 , vec_3_1; register INT s1,s2; s1 = start1 + 1; s2 = start1 + 2; mtx_3 = _mm_load_pd(mtx_val+start1); mtx_3_1 = _mm_load_pd(mtx_val+s2); vec_3 = _mm_set_pd(vec_val[col_idx[s1]],vec_val[col_idx[start1]]); vec_3_1 = _mm_set_pd(0,vec_val[col_idx[s2]]); ans_3 = _mm_fmadd_pd(mtx_3_1,vec_3_1,_mm_mul_pd(mtx_3,vec_3)); answer = ans_3[0] + ans_3[1]; return answer; } } } inline DOU calculation(INT start1,INT num,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict vec_val) { if(num>=4) { return SIMD_fast1(start1,num,row_ptr,col_idx,mtx_val,vec_val); } else { return SIMD_fast2(start1,num,row_ptr,col_idx,mtx_val,vec_val); } } inline void thread_block(INT thread_id,INT start,INT end,INT start2,INT end2,INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,DOU * __restrict mtx_ans,DOU * __restrict mid_ans,DOU * __restrict vec_val) { register INT start1,end1,num,Thread,i; register DOU sum; switch(start < end) { case true: { mtx_ans[start] = 0.0; mtx_ans[end] = 0.0; start1 = row_ptr[start] + start2; start++; end1 = row_ptr[start]; num = end1 - start1; Thread = thread_id<<1; mid_ans[Thread] = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); start1 = end1; #pragma simd for(i=start;i<end;++i) { end1 = row_ptr[i+1]; num = end1 - start1; sum = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); mtx_ans[i] = sum; start1 = end1; } start1 = row_ptr[end]; end1 = start1 + end2; mid_ans[Thread | 1] = calculation(start1,end2,row_ptr,col_idx,mtx_val,vec_val); return ; } default : { mtx_ans[start] = 0.0; Thread = thread_id<<1; start1 = row_ptr[start] + start2; num = end2 - start2; mid_ans[Thread] = calculation(start1,num,row_ptr,col_idx,mtx_val,vec_val); mid_ans[Thread | 1] = 0.0; return ; } } } inline INT binary_search(INT *&row_ptr,INT num,INT end) { INT l,r,h,t=0; l=0,r=end; while(l<=r) { h = (l+r)>>1; if(row_ptr[h]>=num) { r=h-1; } else { l=h+1; t=h; } } return t; } inline void albus_balance(INT *&row_ptr,INT *&par_set,INT *&start,INT *&end,INT *&start1,INT *&end1,DOU *&mid_ans,INT thread_nums) { register int tmp; start[0] = 0; start1[0] = 0; end[thread_nums-1] = par_set[0]; end1[thread_nums-1] = 0; INT tt=par_set[2]/thread_nums; for(INT i=1;i<thread_nums;i++) { tmp=tt*i; start[i] = binary_search(row_ptr,tmp,par_set[0]); start1[i] = tmp - row_ptr[start[i]]; end[i-1] = start[i]; end1[i-1] = start1[i]; } } inline void SPMV_DOU(INT * __restrict row_ptr,INT * __restrict col_idx,DOU * __restrict mtx_val,INT * __restrict par_set,DOU * __restrict mtx_ans,DOU * __restrict vec_val,INT * __restrict start,INT * __restrict end,INT * __restrict start1,INT * __restrict end1,DOU * __restrict mid_ans, INT thread_nums) { register INT i; #pragma omp parallel private(i) { #pragma omp for schedule(static) nowait for(i=0;i<thread_nums;++i) { thread_block(i,start[i],end[i],start1[i],end1[i],row_ptr,col_idx,mtx_val,mtx_ans,mid_ans,vec_val); } } mtx_ans[0] = mid_ans[0]; INT sub; #pragma unroll(32) for(i=1;i<thread_nums;++i) { sub = i<<1; register INT tmp1 = start[i]; register INT tmp2 = end[i-1]; if(tmp1 == tmp2) { mtx_ans[tmp1] += (mid_ans[sub-1] + mid_ans[sub]); } else { mtx_ans[tmp1] += mid_ans[sub]; mtx_ans[tmp2] += mid_ans[sub-1]; } } }
problem.p4.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // [email protected] // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){ double Bmin = 1.0; double Bmax = 10.0; double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition double c1 = (Bmax+Bmin)/2; double c3 = 10.0; // how sharply (B)eta transitions double xcenter = 0.50; double ycenter = 0.50; double zcenter = 0.50; // calculate distance from center of the domain (0.5,0.5,0.5) double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2); double r2x = 2.0*(x-xcenter); double r2y = 2.0*(y-ycenter); double r2z = 2.0*(z-zcenter); //double r2xx = 2.0; //double r2yy = 2.0; //double r2zz = 2.0; double r = pow(r2,0.5); double rx = 0.5*r2x*pow(r2,-0.5); double ry = 0.5*r2y*pow(r2,-0.5); double rz = 0.5*r2z*pow(r2,-0.5); //double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5); //double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5); //double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - *B = c1+c2*tanh( c3*(r-0.25) ); *Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2)); *By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2)); *Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2)); } //------------------------------------------------------------------------------------------------------------------------------ void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){ // should be continuous in u, u', and u'' // v(w) = w^4 - 2w^3 + w^2 + c // u(x,y,z) = v(x)v(y)v(z) // If Periodic, then the integral of the RHS should sum to zero. // Setting shift=1/30 should ensure that the integrals of X, Y, or Z should sum to zero... // That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero // If dirichlet, then w(0)=w(1) = 0.0 // Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary double shift = 0.0;if(isPeriodic)shift= -1.0/30.0; double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) + shift; double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) + shift; double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) + shift; double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x; double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y; double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z; double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0; double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0; double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0; *U = X*Y*Z; *Ux = Xx*Y*Z; *Uy = X*Yy*Z; *Uz = X*Y*Zz; *Uxx = Xxx*Y*Z; *Uyy = X*Yyy*Z; *Uzz = X*Y*Zzz; } //------------------------------------------------------------------------------------------------------------------------------ void initialize_problem(level_type * level, double hLevel, double a, double b){ level->h = hLevel; int box; for(box=0;box<level->num_my_boxes;box++){ box_type *lbox = &level->my_boxes[box]; memset(lbox->vectors[VECTOR_ALPHA ].get(),0,lbox->volume*sizeof(double)); memset(lbox->vectors[VECTOR_BETA_I].get(),0,lbox->volume*sizeof(double)); memset(lbox->vectors[VECTOR_BETA_J].get(),0,lbox->volume*sizeof(double)); memset(lbox->vectors[VECTOR_BETA_K].get(),0,lbox->volume*sizeof(double)); memset(lbox->vectors[VECTOR_UTRUE ].get(),0,lbox->volume*sizeof(double)); memset(lbox->vectors[VECTOR_F ].get(),0,lbox->volume*sizeof(double)); int i,j,k; const int jStride = lbox->jStride; const int kStride = lbox->kStride; const int ghosts = lbox->ghosts; const int dim_i = lbox->dim; const int dim_j = lbox->dim; const int dim_k = lbox->dim; // #pragma omp parallel for private(k,j,i) collapse(3) hclib::finish([] { hclib::loop_domain_3d loop(dim_k, dim_j, dim_i); hclib::forasync3D_nb(&loop, [] (int k, int j, int i) { //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // FIX... move to quadrature version to initialize the problem. // i.e. the value of an array element is the average value of the function over the cell (finite volume) //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride; double x = hLevel*( (double)(i+level->my_boxes[box].get().low.i) + 0.5 ); // +0.5 to get to the center of cell double y = hLevel*( (double)(j+level->my_boxes[box].get().low.j) + 0.5 ); double z = hLevel*( (double)(k+level->my_boxes[box].get().low.k) + 0.5 ); double A,B,Bx,By,Bz,Bi,Bj,Bk; double U,Ux,Uy,Uz,Uxx,Uyy,Uzz; //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - A = 1.0; B = 1.0; Bx = 0.0; By = 0.0; Bz = 0.0; Bi = 1.0; Bj = 1.0; Bk = 1.0; #ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem... evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta #endif //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) ); double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) ); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - level->my_boxes[box].get().vectors[VECTOR_BETA_I][ijk] = Bi; level->my_boxes[box].get().vectors[VECTOR_BETA_J][ijk] = Bj; level->my_boxes[box].get().vectors[VECTOR_BETA_K][ijk] = Bk; level->my_boxes[box].get().vectors[VECTOR_ALPHA ][ijk] = A; level->my_boxes[box].get().vectors[VECTOR_UTRUE ][ijk] = U; level->my_boxes[box].get().vectors[VECTOR_F ][ijk] = F; //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - }); }); } // quick test for Poisson... if(level->alpha_is_zero==-1)level->alpha_is_zero = (dot(level,VECTOR_ALPHA,VECTOR_ALPHA) == 0.0); } //------------------------------------------------------------------------------------------------------------------------------
GB_unaryop__ainv_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int8 // op(A') function: GB_tran__ainv_bool_int8 // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int8 ( bool *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nx_affine_warp_processor.c
/** * @file nx_affine_warp_processor.c * * This file is part of the IYTE Visual Intelligence Research Group Software Library * * Copyright (C) 2015 Mustafa Ozuysal. All rights reserved. * * @author Mustafa Ozuysal * * Contact [email protected] for comments and bug reports. * */ #include "virg/nexus/nx_affine_warp_processor.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef USE_SSE # include <smmintrin.h> #endif #ifdef _OPENMP # include <omp.h> #else # define omp_get_thread_num() 0 # define omp_get_max_threads() 1 #endif #include <virg/nexus/nx_assert.h> #include <virg/nexus/nx_alloc.h> #include <virg/nexus/nx_filter.h> #include <virg/nexus/nx_image.h> #include <virg/nexus/nx_math.h> #include <virg/nexus/nx_uniform_sampler.h> #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) enum NXAWPBgMode { NX_AWP_BG_FIXED = 0, NX_AWP_BG_REPEAT = 1, NX_AWP_BG_NOISE = 2 }; struct NXAffineWarpProcessor { const struct NXImage *image; struct NXImage *skew_rotation_buffer; struct NXImage *subsample_buffer; struct NXImage *result_buffer; double forward_t[6]; double inverse_t[6]; enum NXAWPBgMode bg_mode; uchar bg_color; float post_blur_sigma; }; static inline void nx_affine_warp_processor_reset_transforms(struct NXAffineWarpProcessor *wp); static inline void nx_affine_warp_processor_update_transforms(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle); static inline void nx_affine_warp_processor_update_inverse_transform(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle); static inline void nx_affine_warp_processor_update_forward_transform(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle); static inline void nx_affine_warp_processor_transformed_buffer_size(struct NXAffineWarpProcessor *wp, int *width, int *height, double scale_x, double scale_y, double angle, const double *current_t); static inline void nx_affine_warp_processor_resize_buffers(struct NXAffineWarpProcessor *wp, double scale, double planar_angle, double tilt, double tilt_angle); static inline void nx_affine_warp_processor_resize_skew_rotation_buffer(struct NXAffineWarpProcessor *wp, double tilt_angle); static inline void nx_affine_warp_processor_resize_subsample_buffer(struct NXAffineWarpProcessor *wp, double tilt); static inline void nx_affine_warp_processor_resize_result_buffer(struct NXAffineWarpProcessor *wp, double scale, double planar_angle); static inline void nx_affine_warp_processor_resize_buffer(struct NXAffineWarpProcessor *wp, const struct NXImage *in_buffer, struct NXImage *out_buffer, double scale_x, double scale_y, double angle); static inline void compute_skew_rotation_buffer(const struct NXImage* img, struct NXImage* buffer, float tilt_angle, float tilt); static inline void compute_subsample_buffer(struct NXImage* in_buffer, struct NXImage* out_buffer, float tilt, float scale); static inline void compute_result_buffer(struct NXImage* in_buffer, struct NXImage* res_buffer, float scale, float planar_angle, float post_blur_sigma); static inline void fill_warp_buffer_bg(const struct NXImage* image, struct NXImage* warp_buffer, float t0, float t1, float t2, float t3, float t4, float t5, enum NXAWPBgMode bg_mode, int bg_color); struct NXAffineWarpProcessor *nx_affine_warp_processor_new() { struct NXAffineWarpProcessor *wp = NX_NEW(1, struct NXAffineWarpProcessor); wp->image = NULL; wp->skew_rotation_buffer = nx_image_alloc(); wp->subsample_buffer = nx_image_alloc(); wp->result_buffer = nx_image_alloc(); nx_affine_warp_processor_reset_transforms(wp); wp->bg_mode = NX_AWP_BG_NOISE; wp->bg_color = 0; wp->post_blur_sigma = 0.0f; return wp; } void nx_affine_warp_processor_free(struct NXAffineWarpProcessor *wp) { if (wp) { nx_image_free(wp->skew_rotation_buffer); nx_image_free(wp->subsample_buffer); nx_image_free(wp->result_buffer); nx_free(wp); } } void nx_affine_warp_processor_warp(struct NXAffineWarpProcessor *wp, const struct NXImage *img, struct NXAffineWarpParam param) { NX_ASSERT_PTR(wp); NX_ASSERT_PTR(img); NX_ASSERT_PTR(img->data); NX_IMAGE_ASSERT_GRAYSCALE(img); wp->image = img; nx_affine_warp_processor_resize_buffers(wp, param.scale, param.planar_angle, param.tilt, param.tilt_angle); compute_skew_rotation_buffer(img, wp->skew_rotation_buffer, param.tilt_angle, param.tilt); compute_subsample_buffer(wp->skew_rotation_buffer, wp->subsample_buffer, param.tilt, param.scale); compute_result_buffer(wp->subsample_buffer, wp->result_buffer, param.scale, param.planar_angle, wp->post_blur_sigma); double *t = &wp->inverse_t[0]; fill_warp_buffer_bg(img, wp->result_buffer, t[0], t[1], t[2], t[3], t[4], t[5], wp->bg_mode, wp->bg_color); } const struct NXImage *nx_affine_warp_processor_warp_result(struct NXAffineWarpProcessor *wp) { NX_ASSERT_PTR(wp); return wp->result_buffer; } const double *nx_affine_warp_processor_forward_transform(struct NXAffineWarpProcessor *wp) { NX_ASSERT_PTR(wp); return &wp->forward_t[0]; } const double *nx_affine_warp_processor_inverse_transform(struct NXAffineWarpProcessor *wp) { NX_ASSERT_PTR(wp); return &wp->inverse_t[0]; } void nx_affine_warp_processor_set_bg_fixed(struct NXAffineWarpProcessor *wp, uchar bg_color) { NX_ASSERT_PTR(wp); wp->bg_mode = NX_AWP_BG_FIXED; wp->bg_color = bg_color; } void nx_affine_warp_processor_set_bg_repeat(struct NXAffineWarpProcessor *wp) { NX_ASSERT_PTR(wp); wp->bg_mode = NX_AWP_BG_REPEAT; } void nx_affine_warp_processor_set_bg_noise(struct NXAffineWarpProcessor *wp) { NX_ASSERT_PTR(wp); wp->bg_mode = NX_AWP_BG_NOISE; } void nx_affine_warp_processor_set_post_blur_sigma(struct NXAffineWarpProcessor *wp, float sigma) { NX_ASSERT_PTR(wp); wp->post_blur_sigma = sigma; } static inline void transform_set_identity(double *t) { t[0] = 1.0; t[2] = 0.0; t[4] = 0.0; t[1] = 0.0; t[3] = 1.0; t[5] = 0.0; } static inline void transform_apply(const double *t, double *p) { double xp = p[0]*t[0] + p[1]*t[2] + t[4]; double yp = p[0]*t[1] + p[1]*t[3] + t[5]; p[0] = xp; p[1] = yp; } static inline void transform_combine(double *r, const double *t0, const double *t1) { double r0 = t0[0]*t1[0] + t0[2]*t1[1]; double r1 = t0[1]*t1[0] + t0[3]*t1[1]; double r2 = t0[0]*t1[2] + t0[2]*t1[3]; double r3 = t0[1]*t1[2] + t0[3]*t1[3]; double r4 = t0[0]*t1[4] + t0[2]*t1[5] + t0[4]; double r5 = t0[1]*t1[4] + t0[3]*t1[5] + t0[5]; r[0] = r0; r[1] = r1; r[2] = r2; r[3] = r3; r[4] = r4; r[5] = r5; } void nx_affine_warp_processor_reset_transforms(struct NXAffineWarpProcessor *wp) { transform_set_identity(&wp->forward_t[0]); transform_set_identity(&wp->inverse_t[0]); } void nx_affine_warp_processor_update_transforms(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle) { nx_affine_warp_processor_update_forward_transform(wp, center_in, center_out, scale_x, scale_y, angle); nx_affine_warp_processor_update_inverse_transform(wp, center_in, center_out, scale_x, scale_y, angle); } void nx_affine_warp_processor_update_forward_transform(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle) { double c = cos(angle); double s = sin(angle); double cx = scale_x*c; double sx = scale_x*s; double cy = scale_y*c; double sy = scale_y*s; double dx = center_out[0] - center_in[0]*cx + center_in[1]*sx; double dy = center_out[1] - center_in[0]*sy - center_in[1]*cy; double stage_t[6] = { cx, sy, -sx, cy, dx, dy }; transform_combine(&wp->forward_t[0], &stage_t[0], &wp->forward_t[0]); } void nx_affine_warp_processor_update_inverse_transform(struct NXAffineWarpProcessor *wp, const double *center_in, const double *center_out, double scale_x, double scale_y, double angle) { double c = cos(angle); double s = sin(angle); double cx = c/scale_x; double sx = s/scale_x; double cy = c/scale_y; double sy = s/scale_y; double dx = center_in[0] - center_out[0]*cx - center_out[1]*sx; double dy = center_in[1] + center_out[0]*sy - center_out[1]*cy; double stage_t[6] = { cx, -sy, sx, cy, dx, dy }; transform_combine(&wp->inverse_t[0], &wp->inverse_t[0], &stage_t[0]); } static inline double min4(double a, double b, double c, double d) { double ab = MIN(a,b); double cd = MIN(c,d); return MIN(ab,cd); } static inline double max4(double a, double b, double c, double d) { double ab = MAX(a,b); double cd = MAX(c,d); return MAX(ab,cd); } void nx_affine_warp_processor_transformed_buffer_size(struct NXAffineWarpProcessor *wp, int *width, int *height, double scale_x, double scale_y, double angle, const double *current_t) { const double BUFFER_BORDER_SIZE = 1.0; double tl[2] = { 0.0, 0.0 }; double tr[2] = { (double)*width, 0.0 }; double bl[2] = { 0.0, (double)*height }; double br[2] = { (double)*width, (double)*height }; transform_apply(current_t, &tl[0]); transform_apply(current_t, &tr[0]); transform_apply(current_t, &bl[0]); transform_apply(current_t, &br[0]); double c = cos(angle); double s = sin(angle); double required_t[6] = { scale_x*c, scale_y*s, -scale_x*s, scale_y*c, 0.0, 0.0 }; transform_apply(required_t, &tl[0]); transform_apply(required_t, &tr[0]); transform_apply(required_t, &bl[0]); transform_apply(required_t, &br[0]); double x_min = min4(tl[0], tr[0], bl[0], br[0]); double x_max = max4(tl[0], tr[0], bl[0], br[0]); double y_min = min4(tl[1], tr[1], bl[1], br[1]); double y_max = max4(tl[1], tr[1], bl[1], br[1]); double rw = x_max - x_min + 2*BUFFER_BORDER_SIZE; double rh = y_max - y_min + 2*BUFFER_BORDER_SIZE; *width = (int)rw; *height = (int)rh; } void nx_affine_warp_processor_resize_buffers(struct NXAffineWarpProcessor *wp, double scale, double planar_angle, double tilt, double tilt_angle) { nx_affine_warp_processor_reset_transforms(wp); nx_affine_warp_processor_resize_skew_rotation_buffer(wp, tilt_angle); nx_affine_warp_processor_resize_subsample_buffer(wp, tilt); nx_affine_warp_processor_resize_result_buffer(wp, scale, planar_angle); /* fprintf(stderr, "Buffer sizes\n%d %d\n%d %d\n%d %d\n", */ /* wp->skew_rotation_buffer->width, wp->skew_rotation_buffer->height, */ /* wp->subsample_buffer->width, wp->subsample_buffer->height, */ /* wp->result_buffer->width, wp->result_buffer->height); */ } void nx_affine_warp_processor_resize_skew_rotation_buffer(struct NXAffineWarpProcessor *wp, double tilt_angle) { nx_affine_warp_processor_resize_buffer(wp, wp->image, wp->skew_rotation_buffer, 1.0, 1.0, tilt_angle); } void nx_affine_warp_processor_resize_subsample_buffer(struct NXAffineWarpProcessor *wp, double tilt) { nx_affine_warp_processor_resize_buffer(wp, wp->skew_rotation_buffer, wp->subsample_buffer, 1.0 / tilt, 1.0, 0.0); } void nx_affine_warp_processor_resize_result_buffer(struct NXAffineWarpProcessor *wp, double scale, double planar_angle) { nx_affine_warp_processor_resize_buffer(wp, wp->subsample_buffer, wp->result_buffer, scale, scale, planar_angle); } void nx_affine_warp_processor_resize_buffer(struct NXAffineWarpProcessor *wp, const struct NXImage *in_buffer, struct NXImage *out_buffer, double scale_x, double scale_y, double angle) { int wi = in_buffer->width; int hi = in_buffer->height; int wo = wp->image->width; int ho = wp->image->height; nx_affine_warp_processor_transformed_buffer_size(wp, &wo, &ho, scale_x, scale_y, angle, wp->forward_t); nx_image_resize(out_buffer, wo, ho, -1, NX_IMAGE_GRAYSCALE); nx_image_set_zero(out_buffer); double center_in[2] = { wi/2.0, hi/2.0 }; double center_out[2] = { wo/2.0, ho/2.0 }; nx_affine_warp_processor_update_transforms(wp, &center_in[0], &center_out[0], scale_x, scale_y, angle); } inline static void fill_inverse_transform(float* t, float cx_in, float cy_in, float cx_out, float cy_out, float scale_x, float scale_y, float angle); static void warp_buffer_affine_bilinear(const struct NXImage* in_buffer, struct NXImage* out_buffer, const float* t); static void warp_processor_blur_s(struct NXImage *image, float sigma_x, float sigma_y); void compute_skew_rotation_buffer(const struct NXImage* img, struct NXImage* buffer, float tilt_angle, float tilt) { int iw = img->width; int ih = img->height; int bw = buffer->width; int bh = buffer->height; float t[9]; fill_inverse_transform(t, iw/2.0f, ih/2.0f, bw/2.0f, bh/2.0f, 1.0f, 1.0f, tilt_angle); warp_buffer_affine_bilinear(img, buffer, t); float sigma_x = 0.8 * sqrt(tilt*tilt-1); warp_processor_blur_s(buffer, sigma_x, 0.0f); } void compute_subsample_buffer(struct NXImage* in_buffer, struct NXImage* out_buffer, float tilt, float scale) { int iw = in_buffer->width; int ih = in_buffer->height; int ow = out_buffer->width; int oh = out_buffer->height; float t[9]; fill_inverse_transform(t, iw/2.0f, ih/2.0f, ow/2.0f, oh/2.0f, 1.0f/tilt, 1.0f, 0.0f); warp_buffer_affine_bilinear(in_buffer, out_buffer, t); if (scale > 1.0) { float sigma = 0.8 * sqrt(scale*scale-1); warp_processor_blur_s(out_buffer, sigma, sigma); } } void compute_result_buffer(struct NXImage* in_buffer, struct NXImage* res_buffer, float scale, float planar_angle, float post_blur_sigma) { int iw = in_buffer->width; int ih = in_buffer->height; int rw = res_buffer->width; int rh = res_buffer->height; float t[9]; fill_inverse_transform(t, iw/2.0f, ih/2.0f, rw/2.0f, rh/2.0f, scale, scale, planar_angle); warp_buffer_affine_bilinear(in_buffer, res_buffer, t); warp_processor_blur_s(res_buffer, post_blur_sigma, post_blur_sigma); } inline static void fill_inverse_transform(float* t, float cx_in, float cy_in, float cx_out, float cy_out, float scale_x, float scale_y, float angle) { float c = cos(angle); float s = sin(angle); float cx = c/scale_x; float sx = s/scale_x; float cy = c/scale_y; float sy = s/scale_y; float dx = cx_in - cx_out*cx - cy_out*sx; float dy = cy_in + cx_out*sy - cy_out*cy; t[0] = cx; t[1] = -sy; t[2] = 0.0f; t[3] = sx; t[4] = cy; t[5] = 0.0f; t[6] = dx; t[7] = dy; t[8] = 1.0f; } static void warp_buffer_affine_bilinear(const struct NXImage* in_buffer, struct NXImage* out_buffer, const float* t) { #ifdef USE_SSE const float LAST_X = in_buffer->width - 2; const float LAST_Y = in_buffer->height - 2; __m128 BOUNDS = _mm_set_ps(LAST_Y, 0.0f, LAST_X, 0.0f); __m128 ONES = _mm_set1_ps(1.0f); __m128 T01 = _mm_set_ps(0.0f, 0.0f, t[1], t[0]); __m128 T34 = _mm_set_ps(0.0f, 0.0f, t[4], t[3]); __m128 T67 = _mm_set_ps(0.0f, 0.0f, t[7], t[6]); int32_t pixels[4] __attribute__ ((aligned (16))); #ifdef _OPENMP #pragma omp parallel for private(pixels) schedule(dynamic, 4) #endif for (int y = 0; y < out_buffer->height; ++y) { uchar *drow = out_buffer->data + y*out_buffer->row_stride; __m128 XY = _mm_add_ps(T67, _mm_mul_ps(T34, _mm_set1_ps(y))); for (int x = 0; x < out_buffer->width; ++x, XY = _mm_add_ps(XY, T01)) { __m128 XPYP = _mm_unpacklo_ps(XY, XY); __m128 XPIYPI = _mm_floor_ps(XPYP); __m128 C1 = _mm_blend_ps(XPIYPI, BOUNDS, 10); __m128 C2 = _mm_blend_ps(XPIYPI, BOUNDS, 5); int inside = _mm_test_all_ones(_mm_castps_si128(_mm_cmpge_ps(C1, C2))); if (!inside) { continue; } __m128 XMYM = _mm_sub_ps(XPYP, XPIYPI); __m128 ONEMXY = _mm_sub_ps(ONES, XMYM); __m128 WEIGHTS_Y = _mm_movehl_ps(XMYM, ONEMXY); // 1-(y-yi) 1-(y-yi) (y-yi) (y-yi) __m128 WEIGHTS_X = _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(_mm_move_ss(ONEMXY, XMYM)))); // 1-(x-xi) (x-xi) 1-(x-xi) (x-xi) int xpi = _mm_cvt_ss2si(XPIYPI); int ypi = _mm_cvt_ss2si(_mm_shuffle_ps(XPIYPI, XPIYPI, 0x0002)); const uchar *p0 = in_buffer->data + ypi*in_buffer->row_stride + xpi; const uchar *p1 = p0 + in_buffer->row_stride; pixels[0] = p0[0]; pixels[1] = p0[1]; pixels[2] = p1[0]; pixels[3] = p1[1]; __m128 PIXELS = _mm_cvtepi32_ps(_mm_load_si128((const __m128i*)pixels)); __m128 R = _mm_dp_ps(PIXELS, _mm_mul_ps(WEIGHTS_X, WEIGHTS_Y), 0xF1); int I = _mm_cvt_ss2si(R); if (I < 0) I = 0; else if (I > 255) I = 255; drow[x] = I; } } #else const int LAST_X = in_buffer->width - 1; const int LAST_Y = in_buffer->height - 1; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic, 4) #endif for (int y = 0; y < out_buffer->height; ++y) { uchar *drow = out_buffer->data + y*out_buffer->row_stride; float xp = y*t[3] + t[6]; float yp = y*t[4] + t[7]; for (int x = 0; x < out_buffer->width; ++x, xp += t[0], yp += t[1]) { int xpi = xp; int ypi = yp; if (xpi < 0 || xpi >= LAST_X || ypi < 0 || ypi >= LAST_Y) { continue; } const uchar *p0 = in_buffer->data + ypi*in_buffer->row_stride + xpi; const uchar *p1 = p0 + in_buffer->row_stride; float u = xp-xpi; float v = yp-ypi; float up = 1.0f - u; float vp = 1.0f - v; int I = vp*(up*p0[0] + u*p0[1]) + v*(up*p1[0] + u*p1[1]); if (I < 0) I = 0; else if (I > 255) I = 255; drow[x] = I; } } #endif } void fill_warp_buffer_bg(const struct NXImage* image, struct NXImage* warp_buffer, float t0, float t1, float t2, float t3, float t4, float t5, enum NXAWPBgMode bg_mode, int bg_color) { const int LAST_X = image->width - 1; const int LAST_Y = image->height - 1; for (int y = 0; y < warp_buffer->height; ++y) { uchar *drow = warp_buffer->data + y*warp_buffer->row_stride; float xp = y*t2 + t4; float yp = y*t3 + t5; for (int x = 0; x < warp_buffer->width; ++x, xp += t0, yp += t1) { NXBool bg = NX_FALSE; int xpi = xp; int ypi = yp; float u = xp-xpi; float v = yp-ypi; float up = 1.0f - u; float vp = 1.0f - v; int idx[2] = { xpi, xpi + 1}; int idy[2] = { ypi, ypi + 1}; switch (bg_mode) { default: case NX_AWP_BG_FIXED: if (idx[0] <= 0 || idx[1] >= LAST_X || idy[0] <= 0 || idy[1] >= LAST_Y) { drow[x] = bg_color; continue; } break; case NX_AWP_BG_NOISE: if (idx[0] <= 0 || idx[1] >= LAST_X || idy[0] <= 0 || idy[1] >= LAST_Y) { drow[x] = 255.0f * NX_UNIFORM_SAMPLE_S; continue; } break; case NX_AWP_BG_REPEAT: if (idx[0] <= 0) { idx[0] = 0; idx[1] = 0; bg = NX_TRUE; } else if (idx[1] >= LAST_X) { idx[0] = LAST_X; idx[1] = LAST_X; bg = NX_TRUE; } if (idy[0] <= 0) { idy[0] = 0; idy[1] = 0; bg = NX_TRUE; } else if (idy[1] >= LAST_Y) { idy[0] = LAST_Y; idy[1] = LAST_Y; bg = NX_TRUE; } break; } if (bg) { const uchar *p0 = image->data + idy[0]*image->row_stride; const uchar *p1 = image->data + idy[1]*image->row_stride; int I = vp*(up*p0[idx[0]] + u*p0[idx[1]]) + v*(up*p1[idx[0]] + u*p1[idx[1]]); if (I < 0) I = 0; else if (I > 255) I = 255; drow[x] = I; } } } } static void convolve_sym_s_uc(int n, uchar *data, int n_k, const float *kernel) { #ifdef USE_SSE int i = 0; __m128i Z = _mm_setzero_si128(); for (; i < n-7; i += 8) { __m128 K = _mm_set1_ps(kernel[0]); uchar* dk0 = data + i + n_k - 1; __m128i DK0 = _mm_loadl_epi64((const __m128i*)dk0); __m128i B = _mm_unpacklo_epi8(DK0, Z); // 8x16 bit integers @ dk0 __m128 S0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(B, Z)); S0 = _mm_mul_ps(S0, K); __m128 S1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(B, Z)); S1 = _mm_mul_ps(S1, K); for (int k = 1; k < n_k; ++k) { K = _mm_set1_ps(kernel[k]); __m128i DM = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(dk0-k)), Z); __m128i DP = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(dk0+k)), Z); __m128i DMP = _mm_add_epi16(DM, DP); S0 = _mm_add_ps(S0, _mm_mul_ps(K, _mm_cvtepi32_ps(_mm_unpacklo_epi16(DMP, Z)))); S1 = _mm_add_ps(S1, _mm_mul_ps(K, _mm_cvtepi32_ps(_mm_unpackhi_epi16(DMP, Z)))); } __m128i R0 = _mm_cvtps_epi32(S0); __m128i R1 = _mm_cvtps_epi32(S1); __m128i R = _mm_packus_epi16(_mm_packus_epi32(R0, R1), Z); _mm_storel_epi64((__m128i*)(data+i), R); } for (; i < n; ++i) { uchar* dk0 = data + i + n_k - 1; float sum = kernel[0] * *dk0; for (int k = 1; k < n_k; ++k) { sum += kernel[k] * (dk0[-k] + dk0[+k]); } data[i] = sum; } #else for (int i = 0; i < n; ++i) { uchar* dk0 = data + i + n_k - 1; float sum = kernel[0] * *dk0; for (int k = 1; k < n_k; ++k) { sum += kernel[k] * (dk0[-k] + dk0[+k]); } data[i] = sum; } #endif } static const double BLUR_KERNEL_LOSS = 0.003; static void warp_processor_blur_s(struct NXImage *image, float sigma_x, float sigma_y) { const int N_THREADS = omp_get_max_threads(); uchar *buffers[N_THREADS]; for (int i = 0; i < N_THREADS; ++i) buffers[i] = nx_image_filter_buffer_alloc(image->width, image->height, sigma_x, sigma_y); int nkx = nx_kernel_size_min_gaussian(sigma_x, BLUR_KERNEL_LOSS); int nky = nx_kernel_size_min_gaussian(sigma_y, BLUR_KERNEL_LOSS); int nk_max = nx_max_i(nkx, nky); int nk_sym = nk_max / 2 + 1; float *kernel = NX_NEW_S(nk_sym); // Smooth in x-direction int nk = nkx / 2 + 1; nx_kernel_sym_gaussian_s(nk, kernel, sigma_x); #ifdef _OPENMP #pragma omp parallel for schedule(dynamic, 4) #endif for (int y = 0; y < image->height; ++y) { uchar *buffer = buffers[omp_get_thread_num()]; uchar *image_row = image->data + y * image->row_stride; nx_filter_copy_to_buffer1_uc(image->width, buffer, image_row, nkx / 2, NX_BORDER_MIRROR); convolve_sym_s_uc(image->width, buffer, nk, kernel); memcpy(image_row, buffer, image->width * sizeof(uchar)); } // Smooth in y-direction nk = nky / 2 + 1; nx_kernel_sym_gaussian_s(nk, kernel, sigma_y); #ifdef _OPENMP #pragma omp parallel for schedule(dynamic, 4) #endif for (int x = 0; x < image->width; ++x) { uchar *buffer = buffers[omp_get_thread_num()]; uchar *image_col = image->data + x; nx_filter_copy_to_buffer_uc(image->height, buffer, image_col, image->row_stride, nky / 2, NX_BORDER_MIRROR); convolve_sym_s_uc(image->height, buffer, nk, kernel); for (int y = 0; y < image->height; ++y) image_col[y*image->row_stride] = buffer[y]; } nx_free(kernel); for (int i = 0; i < N_THREADS; ++i) nx_free(buffers[i]); }
data_utils.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef VECTORSEARCH_VSEARCH_DATA_UTILS_H_ #define VECTORSEARCH_VSEARCH_DATA_UTILS_H_ #include <sys/stat.h> #include <atomic> #include "common_helper.h" #include "common_utils.h" namespace vsearch { namespace COMMON { const int bufsize = 1024 * 1024 * 1024; class DataUtils { public: template <typename T> static void ProcessTSVData(int id, int threadbase, std::uint64_t blocksize, std::string filename, std::string outfile, std::string outmetafile, std::string outmetaindexfile, std::atomic_int &numSamples, int &D, DistCalcMethod distCalcMethod) { std::ifstream inputStream(filename); if (!inputStream.is_open()) { std::cerr << "unable to open file " + filename << std::endl; throw MyException("unable to open file " + filename); exit(1); } std::ofstream outputStream, metaStream_out, metaStream_index; outputStream.open(outfile + std::to_string(id + threadbase), std::ofstream::binary); metaStream_out.open(outmetafile + std::to_string(id + threadbase), std::ofstream::binary); metaStream_index.open(outmetaindexfile + std::to_string(id + threadbase), std::ofstream::binary); if (!outputStream.is_open() || !metaStream_out.is_open() || !metaStream_index.is_open()) { std::cerr << "unable to open output file " << outfile << " " << outmetafile << " " << outmetaindexfile << std::endl; throw MyException("unable to open output files"); exit(1); } std::vector<float> arr; std::vector<T> sample; int base = 1; if (distCalcMethod == DistCalcMethod::Cosine) { base = Utils::GetBase<T>(); } std::uint64_t writepos = 0; int sampleSize = 0; std::uint64_t totalread = 0; std::streamoff startpos = id * blocksize; #ifndef _MSC_VER int enter_size = 1; #else int enter_size = 1; #endif std::string currentLine; size_t index; inputStream.seekg(startpos, std::ifstream::beg); if (id != 0) { std::getline(inputStream, currentLine); totalread += currentLine.length() + enter_size; } std::cout << "Begin thread " << id << " begin at:" << (startpos + totalread) << std::endl; while (!inputStream.eof() && totalread <= blocksize) { std::getline(inputStream, currentLine); if (currentLine.length() <= enter_size || (index = Utils::ProcessLine(currentLine, arr, D, base, distCalcMethod)) < 0) { totalread += currentLine.length() + enter_size; continue; } sample.resize(D); for (int j = 0; j < D; j++) sample[j] = (T)arr[j]; outputStream.write((char *)(sample.data()), sizeof(T) * D); metaStream_index.write((char *)&writepos, sizeof(std::uint64_t)); metaStream_out.write(currentLine.c_str(), index); writepos += index; sampleSize += 1; totalread += currentLine.length() + enter_size; } metaStream_index.write((char *)&writepos, sizeof(std::uint64_t)); metaStream_index.write((char *)&sampleSize, sizeof(int)); inputStream.close(); outputStream.close(); metaStream_out.close(); metaStream_index.close(); numSamples.fetch_add(sampleSize); std::cout << "Finish Thread[" << id << ", " << sampleSize << "] at:" << (startpos + totalread) << std::endl; } static void MergeData(int threadbase, std::string outfile, std::string outmetafile, std::string outmetaindexfile, std::atomic_int &numSamples, int D) { std::ifstream inputStream; std::ofstream outputStream; char *buf = new char[bufsize]; std::uint64_t *offsets; int partSamples; int metaSamples = 0; std::uint64_t lastoff = 0; outputStream.open(outfile, std::ofstream::binary); outputStream.write((char *)&numSamples, sizeof(int)); outputStream.write((char *)&D, sizeof(int)); for (int i = 0; i < threadbase; i++) { std::string file = outfile + std::to_string(i); inputStream.open(file, std::ifstream::binary); while (!inputStream.eof()) { inputStream.read(buf, bufsize); outputStream.write(buf, inputStream.gcount()); } inputStream.close(); remove(file.c_str()); } outputStream.close(); outputStream.open(outmetafile, std::ofstream::binary); for (int i = 0; i < threadbase; i++) { std::string file = outmetafile + std::to_string(i); inputStream.open(file, std::ifstream::binary); while (!inputStream.eof()) { inputStream.read(buf, bufsize); outputStream.write(buf, inputStream.gcount()); } inputStream.close(); remove(file.c_str()); } outputStream.close(); delete[] buf; outputStream.open(outmetaindexfile, std::ofstream::binary); outputStream.write((char *)&numSamples, sizeof(int)); for (int i = 0; i < threadbase; i++) { std::string file = outmetaindexfile + std::to_string(i); inputStream.open(file, std::ifstream::binary); inputStream.seekg(-((long long)sizeof(int)), inputStream.end); inputStream.read((char *)&partSamples, sizeof(int)); offsets = new std::uint64_t[partSamples + 1]; inputStream.seekg(0, inputStream.beg); inputStream.read((char *)offsets, sizeof(std::uint64_t) * (partSamples + 1)); inputStream.close(); remove(file.c_str()); for (int j = 0; j < partSamples + 1; j++) offsets[j] += lastoff; outputStream.write((char *)offsets, sizeof(std::uint64_t) * partSamples); lastoff = offsets[partSamples]; metaSamples += partSamples; delete[] offsets; } outputStream.write((char *)&lastoff, sizeof(std::uint64_t)); outputStream.close(); std::cout << "numSamples:" << numSamples << " metaSamples:" << metaSamples << " D:" << D << std::endl; } static bool MergeIndex(const std::string &p_vectorfile1, const std::string &p_metafile1, const std::string &p_metaindexfile1, const std::string &p_vectorfile2, const std::string &p_metafile2, const std::string &p_metaindexfile2) { std::ifstream inputStream1, inputStream2; std::ofstream outputStream; char *buf = new char[bufsize]; int R1, R2, C1, C2; #define MergeVector(inputStream, vectorFile, R, C) \ inputStream.open(vectorFile, std::ifstream::binary); \ if (!inputStream.is_open()) { \ std::cout << "Cannot open vector file: " << vectorFile << "!" \ << std::endl; \ return false; \ } \ inputStream.read((char *)&(R), sizeof(int)); \ inputStream.read((char *)&(C), sizeof(int)); MergeVector(inputStream1, p_vectorfile1, R1, C1) MergeVector(inputStream2, p_vectorfile2, R2, C2) #undef MergeVector if (C1 != C2) { inputStream1.close(); inputStream2.close(); std::cout << "Vector dimensions are not the same!" << std::endl; return false; } R1 += R2; outputStream.open(p_vectorfile1 + "_tmp", std::ofstream::binary); outputStream.write((char *)&R1, sizeof(int)); outputStream.write((char *)&C1, sizeof(int)); while (!inputStream1.eof()) { inputStream1.read(buf, bufsize); outputStream.write(buf, inputStream1.gcount()); } while (!inputStream2.eof()) { inputStream2.read(buf, bufsize); outputStream.write(buf, inputStream2.gcount()); } inputStream1.close(); inputStream2.close(); outputStream.close(); if (p_metafile1 != "" && p_metafile2 != "") { outputStream.open(p_metafile1 + "_tmp", std::ofstream::binary); #define MergeMeta(inputStream, metaFile) \ inputStream.open(metaFile, std::ifstream::binary); \ if (!inputStream.is_open()) { \ std::cout << "Cannot open meta file: " << metaFile << "!" << std::endl; \ return false; \ } \ while (!inputStream.eof()) { \ inputStream.read(buf, bufsize); \ outputStream.write(buf, inputStream.gcount()); \ } \ inputStream.close(); MergeMeta(inputStream1, p_metafile1) MergeMeta(inputStream2, p_metafile2) #undef MergeMeta outputStream.close(); delete[] buf; std::uint64_t *offsets; int partSamples; std::uint64_t lastoff = 0; outputStream.open(p_metaindexfile1 + "_tmp", std::ofstream::binary); outputStream.write((char *)&R1, sizeof(int)); #define MergeMetaIndex(inputStream, metaIndexFile) \ inputStream.open(metaIndexFile, std::ifstream::binary); \ if (!inputStream.is_open()) { \ std::cout << "Cannot open meta index file: " << metaIndexFile << "!" \ << std::endl; \ return false; \ } \ inputStream.read((char *)&partSamples, sizeof(int)); \ offsets = new std::uint64_t[partSamples + 1]; \ inputStream.read((char *)offsets, \ sizeof(std::uint64_t) * (partSamples + 1)); \ inputStream.close(); \ for (int j = 0; j < partSamples + 1; j++) offsets[j] += lastoff; \ outputStream.write((char *)offsets, sizeof(std::uint64_t) * partSamples); \ lastoff = offsets[partSamples]; \ delete[] offsets; MergeMetaIndex(inputStream1, p_metaindexfile1) MergeMetaIndex(inputStream2, p_metaindexfile2) #undef MergeMetaIndex outputStream.write((char *)&lastoff, sizeof(std::uint64_t)); outputStream.close(); rename((p_metafile1 + "_tmp").c_str(), p_metafile1.c_str()); rename((p_metaindexfile1 + "_tmp").c_str(), p_metaindexfile1.c_str()); } rename((p_vectorfile1 + "_tmp").c_str(), p_vectorfile1.c_str()); std::cout << "Merged -> numSamples:" << R1 << " D:" << C1 << std::endl; return true; } template <typename T> static void ParseData(std::string filenames, std::string outfile, std::string outmetafile, std::string outmetaindexfile, int threadnum, DistCalcMethod distCalcMethod) { omp_set_num_threads(threadnum); std::atomic_int numSamples = {0}; int D = -1; int threadbase = 0; std::vector<std::string> inputFileNames = Helper::StrUtils::SplitString(filenames, ","); for (std::string inputFileName : inputFileNames) { #ifndef _MSC_VER struct stat stat_buf; stat(inputFileName.c_str(), &stat_buf); #else struct _stat64 stat_buf; int res = _stat64(inputFileName.c_str(), &stat_buf); #endif std::uint64_t blocksize = (stat_buf.st_size + threadnum - 1) / threadnum; #pragma omp parallel for for (int i = 0; i < threadnum; i++) { ProcessTSVData<T>(i, threadbase, blocksize, inputFileName, outfile, outmetafile, outmetaindexfile, numSamples, D, distCalcMethod); } threadbase += threadnum; } MergeData(threadbase, outfile, outmetafile, outmetaindexfile, numSamples, D); } }; } // COMMON } // vsearch #endif //VECTORSEARCH_VSEARCH_DATA_UTILS_H_
build.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <stdbool.h> #include <sys/time.h> #include <unistd.h> #include "mkl.h" #include <immintrin.h> #include "omp.h" void build(char* build_file, char* data_folder, char* dest_folder, int rank); void print_float_arr(float *arr, long long int num_elements); void print_int_arr(int *arr, int num_elements); int* get_nonzero_summation_term_idx(char* build_file, char* data_folder, int rank); float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank); void scopy_sequential(long long int n, float *src, float *dst); void scopy_par(long long int n, float *src, float *dst); double get_sec(); int main(int argc, char** argv) { char *build_file = argv[1]; char *data_folder = argv[2]; char *dest_folder = argv[3]; int rank = atoi(argv[4]); int recursion_layer = atoi(argv[5]); build(build_file,data_folder,dest_folder,rank); //printf("recursion_layer %d build rank %d DONE\n",recursion_layer,rank); return 0; } int* get_nonzero_summation_term_idx(char* build_file, char* data_folder, int rank) { int total_active_qubit, num_subcircuits, num_summation_terms, num_cuts; FILE* build_fptr = fopen(build_file, "r"); fscanf(build_fptr,"total_active_qubit=%d num_subcircuits=%d num_summation_terms=%d num_cuts=%d\n",\ &total_active_qubit,&num_subcircuits,&num_summation_terms,&num_cuts); int summation_term_ctr; int num_nonzero_summation_terms = 0; int *non_zero_summation_term_idx = calloc(num_summation_terms+1,sizeof(int)); for (summation_term_ctr=0;summation_term_ctr<num_summation_terms;summation_term_ctr++) { bool summation_term_is_zero = false; int subcircuit_ctr; for (subcircuit_ctr=0;subcircuit_ctr<num_subcircuits;subcircuit_ctr++) { int subcircuit_idx, subcircuit_kron_index; fscanf(build_fptr,"%d,%d ",&subcircuit_idx,&subcircuit_kron_index); char *build_data_file = malloc(256*sizeof(char)); sprintf(build_data_file, "%s/kron_%d_%d.txt", data_folder, subcircuit_idx, subcircuit_kron_index); if(access(build_data_file, F_OK) == -1) { // file doesn't exist summation_term_is_zero = true; free(build_data_file); } } if (!summation_term_is_zero) { non_zero_summation_term_idx[num_nonzero_summation_terms+1] = summation_term_ctr; num_nonzero_summation_terms++; } } fclose(build_fptr); non_zero_summation_term_idx[0] = num_nonzero_summation_terms; // printf("num_subcircuits %d non_zero_num_summation_terms %d/%d\n",\ // num_subcircuits,num_nonzero_summation_terms,num_summation_terms); return non_zero_summation_term_idx; } void build(char* build_file, char* data_folder, char* dest_folder, int rank) { int *non_zero_summation_term_idx = get_nonzero_summation_term_idx(build_file,data_folder,rank); int total_active_qubit, num_subcircuits, num_summation_terms, num_cuts; FILE* build_fptr = fopen(build_file, "r"); fscanf(build_fptr,"total_active_qubit=%d num_subcircuits=%d num_summation_terms=%d num_cuts=%d\n",\ &total_active_qubit,&num_subcircuits,&num_summation_terms,&num_cuts); long long int reconstruction_len = (long long int) pow(2,total_active_qubit); float *reconstructed_prob = (float*) calloc(reconstruction_len,sizeof(float)); // cblas_sger parameters MKL_INT incx, incy; CBLAS_LAYOUT layout = CblasRowMajor; float alpha = 1; incx = 1; incy = 1; int summation_term_ctr; int non_zero_summation_term_ctr = 1; int num_non_zero_summation_terms_remaining = non_zero_summation_term_idx[0]; double total_build_time = 0; double log_time = 0; for (summation_term_ctr=0;summation_term_ctr<num_summation_terms;summation_term_ctr++) { double build_begin = get_sec(); if (num_non_zero_summation_terms_remaining==0) { //printf("Rank %d : no more remaining non_zero summation terms\n",rank); break; } else if (summation_term_ctr==non_zero_summation_term_idx[non_zero_summation_term_ctr]) { //printf("Rank %d : summation term %d is nonzero\n",rank,summation_term_ctr); float *summation_term = (float*) calloc(reconstruction_len,sizeof(float)); // Read first subcircuit int subcircuit_idx_0, subcircuit_kron_index_0; fscanf(build_fptr,"%d,%d ",&subcircuit_idx_0,&subcircuit_kron_index_0); // printf("Subcircuit %d, kron term %d\n",subcircuit_idx_0,subcircuit_kron_index_0); char *build_data_file_0 = malloc(256*sizeof(char)); sprintf(build_data_file_0, "%s/kron_%d_%d.txt", data_folder, subcircuit_idx_0, subcircuit_kron_index_0); // printf("Reading file %s\n",build_data_file_0); FILE* build_data_fptr_0 = fopen(build_data_file_0, "r"); int num_active_0; fscanf(build_data_fptr_0,"num_active %d\n",&num_active_0); // printf("num_active %d\n",num_active_0); long long int subcircuit_active_len_0 = (long long int) pow(2,num_active_0); long long int state_ctr_0; for (state_ctr_0=0;state_ctr_0<subcircuit_active_len_0;state_ctr_0++) { // printf("Read state %d\n",state_ctr_0); fscanf(build_data_fptr_0,"%f ",&summation_term[state_ctr_0]); } fclose(build_data_fptr_0); free(build_data_file_0); int subcircuit_ctr; long long int summation_term_accumulated_len = subcircuit_active_len_0; // print_float_arr(summation_term,summation_term_accumulated_len); for (subcircuit_ctr=1;subcircuit_ctr<num_subcircuits;subcircuit_ctr++) { int subcircuit_idx, subcircuit_kron_index; fscanf(build_fptr,"%d,%d ",&subcircuit_idx,&subcircuit_kron_index); char *build_data_file = malloc(256*sizeof(char)); sprintf(build_data_file, "%s/kron_%d_%d.txt", data_folder, subcircuit_idx, subcircuit_kron_index); FILE* build_data_fptr = fopen(build_data_file, "r"); int num_active; fscanf(build_data_fptr,"num_active %d\n",&num_active); long long int subcircuit_active_len = (long long int) pow(2,num_active); long long int state_ctr; float *subcircuit_kron_term = (float*) calloc(subcircuit_active_len,sizeof(float)); for (state_ctr=0;state_ctr<subcircuit_active_len;state_ctr++) { fscanf(build_data_fptr,"%f ",&subcircuit_kron_term[state_ctr]); } // print_float_arr(subcircuit_kron_term,subcircuit_active_len); float *dummy_summation_term = (float*) calloc(summation_term_accumulated_len*subcircuit_active_len,sizeof(float)); cblas_sger(layout, summation_term_accumulated_len, subcircuit_active_len, alpha, summation_term, incx, subcircuit_kron_term, incy, dummy_summation_term, subcircuit_active_len); summation_term_accumulated_len *= subcircuit_active_len; cblas_scopy(summation_term_accumulated_len, dummy_summation_term, 1, summation_term, 1); // scopy_par(summation_term_accumulated_len, dummy_summation_term, summation_term); free(dummy_summation_term); // print_float_arr(summation_term,summation_term_accumulated_len); fclose(build_data_fptr); free(build_data_file); free(subcircuit_kron_term); } vsAdd(reconstruction_len, reconstructed_prob, summation_term, reconstructed_prob); free(summation_term); non_zero_summation_term_ctr++; num_non_zero_summation_terms_remaining--; } else { // printf("Rank %d : summation term %d is zero\n",rank,summation_term_ctr); char line[256]; fgets(line, sizeof(line), build_fptr); } log_time += get_sec() - build_begin; total_build_time += get_sec() - build_begin; log_time = print_log(log_time,total_build_time,summation_term_ctr+1,num_summation_terms,10,rank); } cblas_sscal(reconstruction_len, pow(0.5,num_cuts), reconstructed_prob, 1); // print_float_arr(reconstructed_prob,reconstruction_len); char *build_result_file = malloc(256*sizeof(char)); sprintf(build_result_file, "%s/reconstructed_prob_%d.txt", dest_folder, rank); FILE* build_data_fptr = fopen(build_result_file, "w"); long long int state_ctr; for (state_ctr=0;state_ctr<reconstruction_len;state_ctr++) { fprintf(build_data_fptr,"%e ",reconstructed_prob[state_ctr]); } fclose(build_data_fptr); free(build_result_file); fclose(build_fptr); free(non_zero_summation_term_idx); free(reconstructed_prob); // printf("Rank %d build DONE\n", rank); char *summary_file = malloc(256*sizeof(char)); sprintf(summary_file, "%s/rank_%d_summary.txt", dest_folder, rank); FILE *summary_fptr = fopen(summary_file, "a"); fprintf(summary_fptr,"\nTotal build time = %e\n",total_build_time); fprintf(summary_fptr,"DONE"); free(summary_file); fclose(summary_fptr); return; } void scopy_sequential(long long int n, float *src, float *dst) { long long int n32 = n & -32; long long int i; float *src_curr_pos = src, *dst_curr_pos = dst; for (i = 0; i < n32; i += 32){ _mm256_storeu_ps(dst_curr_pos, _mm256_loadu_ps(src_curr_pos)); _mm256_storeu_ps(dst_curr_pos+8, _mm256_loadu_ps(src_curr_pos+8)); _mm256_storeu_ps(dst_curr_pos+16, _mm256_loadu_ps(src_curr_pos+16)); _mm256_storeu_ps(dst_curr_pos+24, _mm256_loadu_ps(src_curr_pos+24)); src_curr_pos += 32; dst_curr_pos += 32; } if (n32 == n) return; src_curr_pos = src + n32; dst_curr_pos = dst + n32; for (i = n32; i < n; i++){ *dst_curr_pos = *src_curr_pos; dst_curr_pos++; src_curr_pos++; } } void scopy_par(long long int n, float *src, float *dst) { int TOTAL_THREADS=atoi(getenv("OMP_NUM_THREADS")); if (TOTAL_THREADS<=1){ scopy_sequential(n,src,dst); return; } int tid; int max_cpu_num=(int)sysconf(_SC_NPROCESSORS_ONLN); if (TOTAL_THREADS>max_cpu_num) TOTAL_THREADS=max_cpu_num; #pragma omp parallel for schedule(static) for (tid = 0; tid < TOTAL_THREADS; tid++){ long int NUM_DIV_NUM_THREADS = n / TOTAL_THREADS * TOTAL_THREADS; long int DIM_LEN = n / TOTAL_THREADS; long int EDGE_LEN = (NUM_DIV_NUM_THREADS == n) ? n / TOTAL_THREADS : n - NUM_DIV_NUM_THREADS + DIM_LEN; if (tid == 0) scopy_sequential(EDGE_LEN,src,dst); else scopy_sequential(DIM_LEN,src + EDGE_LEN + (tid - 1) * DIM_LEN, dst + EDGE_LEN + (tid - 1) * DIM_LEN); } return; } void print_int_arr(int *arr, int num_elements) { int ctr; if (num_elements<=10) { for (ctr=0;ctr<num_elements;ctr++) { printf("%d ",arr[ctr]); } } else { for (ctr=0;ctr<5;ctr++) { printf("%d ",arr[ctr]); } printf(" ... "); for (ctr=num_elements-5;ctr<num_elements;ctr++) { printf("%d ",arr[ctr]); } } printf(" = %d elements\n",num_elements); } void print_float_arr(float *arr, long long int num_elements) { long long int ctr; if (num_elements<=10) { for (ctr=0;ctr<num_elements;ctr++) { printf("%e ",arr[ctr]); } } else { for (ctr=0;ctr<5;ctr++) { printf("%e ",arr[ctr]); } printf(" ... "); for (ctr=num_elements-5;ctr<num_elements;ctr++) { printf("%e ",arr[ctr]); } } printf(" = %lld elements\n",num_elements); } float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank) { if (log_time>log_frequency) { double eta = elapsed_time/num_finished_jobs*num_total_jobs - elapsed_time; printf("Rank %d finished building %d/%d, elapsed = %e, ETA = %e\n",rank,num_finished_jobs,num_total_jobs,elapsed_time,eta); return 0; } else { return log_time; } } double get_sec() { struct timeval time; gettimeofday(&time, NULL); return (time.tv_sec + 1e-6 * time.tv_usec); }
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); float32x4_t _p1 = vld1q_f32(r1); float32x4_t _p1n = vld1q_f32(r1+4); _outp = vfmaq_f32(_outp, _p1, _k1); _outpn = vfmaq_f32(_outpn, _p1n, _k1); float32x4_t _p2 = vld1q_f32(r2); float32x4_t _p2n = vld1q_f32(r2+4); _outp = vfmaq_f32(_outp, _p2, _k2); _outpn = vfmaq_f32(_outpn, _p2n, _k2); float32x4_t _p3 = vld1q_f32(r3); float32x4_t _p3n = vld1q_f32(r3+4); _outp = vfmaq_f32(_outp, _p3, _k3); _outpn = vfmaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); float32x4x2_t _p1x2 = vld2q_f32(r1); float32x4_t _p1 = _p1x2.val[0]; float32x4x2_t _p1nx2 = vld2q_f32(r1+8); float32x4_t _p1n = _p1nx2.val[0]; _outp = vmlaq_f32(_outp, _p1, _k1); _outpn = vmlaq_f32(_outpn, _p1n, _k1); float32x4x2_t _p2x2 = vld2q_f32(r2); float32x4_t _p2 = _p2x2.val[0]; float32x4x2_t _p2nx2 = vld2q_f32(r2+8); float32x4_t _p2n = _p2nx2.val[0]; _outp = vmlaq_f32(_outp, _p2, _k2); _outpn = vmlaq_f32(_outpn, _p2n, _k2); float32x4x2_t _p3x2 = vld2q_f32(r3); float32x4_t _p3 = _p3x2.val[0]; float32x4x2_t _p3nx2 = vld2q_f32(r3+8); float32x4_t _p3n = _p3nx2.val[0]; _outp = vmlaq_f32(_outp, _p3, _k3); _outpn = vmlaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
openmp_common.c
// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 -o - %s #pragma omp // expected-error {{expected an OpenMP directive}} #pragma omp unknown_directive // expected-error {{expected an OpenMP directive}} void foo() { #pragma omp // expected-error {{expected an OpenMP directive}} #pragma omp unknown_directive // expected-error {{expected an OpenMP directive}} }
vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_onedpl.hpp" #include "seq_mv.h" #include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle()); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr = 0; if (vector) { HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector); if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector), memory_location); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); hypre_VectorMemoryLocation(vector) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_VectorData(vector) ) { hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors * size, memory_location); } if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else { ++ierr; } return ierr; } HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int ierr; ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr = 0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; HYPRE_Complex *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST; hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; HYPRE_Complex *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Complex value; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors > 1 ) { for ( j = 0; j < num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { value = data[ j * vecstride + i * idxstride ]; #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(value), hypre_cimag(value)); #else hypre_fprintf(fp, "%.14e\n", value); #endif } } } else { for (i = 0; i < size; i++) { #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(data[i]), hypre_cimag(data[i])); #else hypre_fprintf(fp, "%.14e\n", data[i]); #endif } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, HYPRE_Complex value ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(v); //hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (size > 0) { HYPRE_THRUST_CALL( fill_n, vector_data, size, value ); } #elif defined(HYPRE_USING_SYCL) if (size > 0) { HYPRE_ONEDPL_CALL( std::fill_n, vector_data, size, value ); } #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { vector_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *= hypre_VectorNumVectors(v); if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST) { /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) { vector_data[i] = 2.0 * hypre_Rand() - 1.0; } } else { HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); for (i = 0; i < size; i++) { h_data[i] = 2.0 * hypre_Rand() - 1.0; } hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST); hypre_TFree(h_data, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x); hypre_TMemcpy( hypre_VectorData(y), hypre_VectorData(x), HYPRE_Complex, size, hypre_VectorMemoryLocation(y), hypre_VectorMemoryLocation(x) ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector* hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize_v2(y, memory_location); hypre_SeqVectorCopy( x, y ); return y; } hypre_Vector* hypre_SeqVectorCloneDeep( hypre_Vector *x ) { return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( HYPRE_Complex alpha, hypre_Vector *y ) { /* special cases */ if (alpha == 1.0) { return 0; } if (alpha == 0.0) { return hypre_SeqVectorSetConstantValues(y, 0.0); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(y); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::scal(*hypre_HandleComputeStream(hypre_handle()), size, alpha, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, y_data, y_data + size, y_data, [alpha](HYPRE_Complex y) -> HYPRE_Complex { return alpha * y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] *= alpha; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( HYPRE_Complex alpha, hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::axpy(*hypre_HandleComputeStream(hypre_handle()), size, alpha, x_data, 1, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, x_data, x_data + size, y_data, y_data, [alpha](HYPRE_Complex x, HYPRE_Complex y) -> HYPRE_Complex { return alpha * x + y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /* y = y + x ./ b */ HYPRE_Int hypre_SeqVectorElmdivpy( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { //TODO //hypre_SeqVectorElmdivpyDevice(x, b, y); /* #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms) #endif */ hypreDevice_IVAXPY(size, b_data, x_data, y_data); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += x_data[i] / b_data[i]; } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* y[i] += x[i] / b[i] where marker[i] == marker_val */ HYPRE_Int hypre_SeqVectorElmdivpyMarked( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y, HYPRE_Int *marker, HYPRE_Int marker_val) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_IVAXPYMarked(size, b_data, x_data, y_data, marker, marker_val); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { if (marker[i] == marker_val) { y_data[i] += x_data[i] / b_data[i]; } } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Real result = 0.0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #ifndef HYPRE_COMPLEX #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) ); #else result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_Real *result_dev = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_DEVICE); HYPRE_SYCL_CALL( oneapi::mkl::blas::dot(*hypre_HandleComputeStream(hypre_handle()), size, x_data, 1, y_data, 1, result_dev).wait() ); hypre_TMemcpy(&result, result_dev, HYPRE_Real, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(result_dev, HYPRE_MEMORY_DEVICE); #else result = HYPRE_ONEDPL_CALL( std::transform_reduce, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_ONEMKLBLAS) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #ifndef HYPRE_COMPLEX /* TODO */ #error "Complex inner product" #endif // #ifndef HYPRE_COMPLEX #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { result += hypre_conj(y_data[i]) * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return result; } //TODO /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i = 0; i < size; ++i ) { sum += data[i]; } return sum; } HYPRE_Int hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location) { HYPRE_Int ierr = 0; #ifdef HYPRE_USING_UNIFIED_MEMORY if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE) { /* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/ return 1; } HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x); if (size == 0) { return ierr; } hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location); #endif return ierr; } #if 0 /* y[i] = max(alpha*x[i], beta*y[i]) */ HYPRE_Int hypre_SeqVectorMax( HYPRE_Complex alpha, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); thrust::maximum<HYPRE_Complex> mx; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_THRUST_CALL( transform, thrust::make_transform_iterator(x_data, alpha * _1), thrust::make_transform_iterator(x_data + size, alpha * _1), thrust::make_transform_iterator(y_data, beta * _1), y_data, mx ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]); } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ hypre_SyncComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } #endif
omp_single.c
<ompts:test> <ompts:testdescription>Test which checks the omp single directive by controlling how often a directive is called in an omp single region.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp single</ompts:directive> <ompts:dependences>omp parallel private,omp flush</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_single</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int nr_threads_in_single; int result; int nr_iterations; int i; </ompts:orphan:vars> nr_threads_in_single = 0; result = 0; nr_iterations = 0; #pragma omp parallel private(i) { for (i = 0; i < LOOPCOUNT; i++) { <ompts:orphan> <ompts:check>#pragma omp single </ompts:check> { #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; } /* end of single */ </ompts:orphan> } /* end of for */ } /* end of parallel */ return ((result == 0) && (nr_iterations == LOOPCOUNT)); } /* end of check_single*/ </ompts:testcode> </ompts:test>
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if (((noise_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(colorize_image,q) <= (QuantumRange/2))) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,size_t *,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MagickPathExtent], symbol[MagickPathExtent]; const char *p, *value; Image *image; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t depth, length, level; p=expression; i=GetImageIndexInList(fx_info->images); depth=0; level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); i=(ssize_t) alpha; p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; if (length != 0) i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetPixelInfo(image,&pixel); (void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,size_t *depth,double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 char *q, subexpression[MagickPathExtent]; double alpha, gamma; register const char *p; *beta=0.0; if (exception->severity >= ErrorException) return(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') return(0.0); *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth, beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta, exception); return(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (*depth)++; if (*depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); (*depth)--; return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha < 0.0) return(0.0); if (alpha > 1.0) return(1.0); return(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); return(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) return(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); return(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); return((double) gcd); } if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return(log10(alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gamma=alpha-floor((alpha/(*beta)))*(*beta); return(gamma); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) return(MagickPHI); if (LocaleCompare(expression,"pi") == 0) return(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); return(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0) return(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha >= 0.0) return(floor(alpha)); return(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth,beta,exception); } while (fabs(alpha) >= MagickEpsilon); return(*beta); } if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; size_t depth; depth=0; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if (((fx_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; Image *canvas, *implode_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (implode_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas->columns; center.y=0.5*canvas->rows; radius=center.x; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) { scale.x=(double) canvas->rows/(double) canvas->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(canvas,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(canvas,implode_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2)) { SetPixelBackgoundColor(implode_image,q); p+=GetPixelChannels(canvas); q+=GetPixelChannels(implode_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(canvas,interpolate_view,implode_image, method,(double) (factor*delta.x/scale.x+center.x),(double) (factor* delta.y/scale.y+center.y),q,exception); } p+=GetPixelChannels(canvas); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(canvas,ImplodeImageTag,progress++, canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if (((morph_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(morph_images,p) <= (QuantumRange/2))) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char geometry[MagickPathExtent], *text; DrawInfo *annotate_info; ImageInfo *image_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); image_info=AcquireImageInfo(); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); text=InterpretImageProperties(image_info,(Image *) image,caption, exception); image_info=DestroyImageInfo(image_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)* (metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; if (GetPixelWriteMask(random_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(random_image); continue; } value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(image,GetPixelRed(left_image,p),r); SetPixelGreen(image,GetPixelGreen(right_image,q),r); SetPixelBlue(image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; Image *canvas, *swirl_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (swirl_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } /* Compute scaling factor. */ center.x=(double) canvas->columns/2.0; center.y=(double) canvas->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) scale.x=(double) canvas->rows/(double) canvas->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(canvas,swirl_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2)) { SetPixelBackgoundColor(swirl_image,q); p+=GetPixelChannels(canvas); q+=GetPixelChannels(swirl_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image, method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception); } p+=GetPixelChannels(canvas); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); continue; } weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0* (weight*weight))); pixel.alpha=GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_view, *wave_view; Image *canvas, *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas,OpaqueAlpha,exception); wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(canvas,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method, (double) x,(double) (y-sine_map[x]),q,exception); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns), GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
ScalarWave_RHSs.h
const REAL invdx0 = 1.0/dxx[0]; const REAL invdx1 = 1.0/dxx[1]; const REAL invdx2 = 1.0/dxx[2]; #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx[2]; i2++) { for(int i1=NGHOSTS; i1<NGHOSTS+Nxx[1]; i1++) { for(int i0=NGHOSTS; i0<NGHOSTS+Nxx[0]; i0++) { { /* * NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils: */ /* * Original SymPy expressions: * "[const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1_i1_i2/3 - uu_i0m2_i1_i2/12 + 4*uu_i0p1_i1_i2/3 - uu_i0p2_i1_i2/12), * const double uu_dDD11 = invdx1**2*(-5*uu/2 + 4*uu_i0_i1m1_i2/3 - uu_i0_i1m2_i2/12 + 4*uu_i0_i1p1_i2/3 - uu_i0_i1p2_i2/12), * const double uu_dDD22 = invdx2**2*(-5*uu/2 + 4*uu_i0_i1_i2m1/3 - uu_i0_i1_i2m2/12 + 4*uu_i0_i1_i2p1/3 - uu_i0_i1_i2p2/12)]" */ const double uu_i0_i1_i2m2 = in_gfs[IDX4(UUGF, i0,i1,i2-2)]; const double uu_i0_i1_i2m1 = in_gfs[IDX4(UUGF, i0,i1,i2-1)]; const double uu_i0_i1m2_i2 = in_gfs[IDX4(UUGF, i0,i1-2,i2)]; const double uu_i0_i1m1_i2 = in_gfs[IDX4(UUGF, i0,i1-1,i2)]; const double uu_i0m2_i1_i2 = in_gfs[IDX4(UUGF, i0-2,i1,i2)]; const double uu_i0m1_i1_i2 = in_gfs[IDX4(UUGF, i0-1,i1,i2)]; const double uu = in_gfs[IDX4(UUGF, i0,i1,i2)]; const double uu_i0p1_i1_i2 = in_gfs[IDX4(UUGF, i0+1,i1,i2)]; const double uu_i0p2_i1_i2 = in_gfs[IDX4(UUGF, i0+2,i1,i2)]; const double uu_i0_i1p1_i2 = in_gfs[IDX4(UUGF, i0,i1+1,i2)]; const double uu_i0_i1p2_i2 = in_gfs[IDX4(UUGF, i0,i1+2,i2)]; const double uu_i0_i1_i2p1 = in_gfs[IDX4(UUGF, i0,i1,i2+1)]; const double uu_i0_i1_i2p2 = in_gfs[IDX4(UUGF, i0,i1,i2+2)]; const double vv = in_gfs[IDX4(VVGF, i0,i1,i2)]; const double tmpFD0 = -(5.0 / 2.0)*uu; const double uu_dDD00 = pow(invdx0, 2)*(tmpFD0 + ((4.0 / 3.0))*uu_i0m1_i1_i2 - (1.0 / 12.0)*uu_i0m2_i1_i2 + ((4.0 / 3.0))*uu_i0p1_i1_i2 - (1.0 / 12.0)*uu_i0p2_i1_i2); const double uu_dDD11 = pow(invdx1, 2)*(tmpFD0 + ((4.0 / 3.0))*uu_i0_i1m1_i2 - (1.0 / 12.0)*uu_i0_i1m2_i2 + ((4.0 / 3.0))*uu_i0_i1p1_i2 - (1.0 / 12.0)*uu_i0_i1p2_i2); const double uu_dDD22 = pow(invdx2, 2)*(tmpFD0 + ((4.0 / 3.0))*uu_i0_i1_i2m1 - (1.0 / 12.0)*uu_i0_i1_i2m2 + ((4.0 / 3.0))*uu_i0_i1_i2p1 - (1.0 / 12.0)*uu_i0_i1_i2p2); /* * NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory: */ /* * Original SymPy expressions: * "[rhs_gfs[IDX4(UUGF, i0, i1, i2)] = vv, * rhs_gfs[IDX4(VVGF, i0, i1, i2)] = uu_dDD00*wavespeed**2 + uu_dDD11*wavespeed**2 + uu_dDD22*wavespeed**2]" */ const double tmp0 = pow(wavespeed, 2); rhs_gfs[IDX4(UUGF, i0, i1, i2)] = vv; rhs_gfs[IDX4(VVGF, i0, i1, i2)] = tmp0*uu_dDD00 + tmp0*uu_dDD11 + tmp0*uu_dDD22; } } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx[0]; i0++) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx[1]; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx[2]; i2++)
parallel_levelset_distance_calculator.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED ) #define KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "utilities/geometry_utilities.h" #include "includes/deprecated_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template< unsigned int TDim> class ParallelDistanceCalculator { public: ///@name Type Definitions ///@{ KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); /// Pointer definition of ParallelDistanceCalculator KRATOS_CLASS_POINTER_DEFINITION(ParallelDistanceCalculator); ///@} ///@name Life Cycle ///@{ /// Default constructor. ParallelDistanceCalculator() {}; /// Destructor. virtual ~ParallelDistanceCalculator() {}; ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance, Flags Options = NOT_CALCULATE_EXACT_DISTANCES_TO_PLANE) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); CalculateExactDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance, Options); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///The difference of this function with previous one is the fact that it wont recalculate the exact distance ///in divided elements in order to preserve the current distance. ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateInterfacePreservingDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); AbsDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } /// A simplified version of CalculateDistances to be used when the rDistanceVar == 0 surface is described by a set of nodes /** * @param rModelPart is the ModelPart on which we will operate * @param rDistanceVar is the Variable that we will use in calculating the distance * @param rAreaVar is the Variable that we will use for L2 projections * @param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances * @param max_distance distances will not be computed after reaching this limit * @see ParallelDistanceCalculator::CalculateDistances */ void CalculateDistancesLagrangianSurface(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //check that variables needed are in the model part if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part",""); array_1d<double,TDim+1> visited; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); // set to zero the distance #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); area = 0.0; double& is_visited = it->GetValue(IS_VISITED); double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = it->FastGetSolutionStepValue(rDistanceVar); if(is_visited != 1.0) { distance = 0.0; } else area = 1.0; // else if(dist < 0.0) // KRATOS_THROW_ERROR(std::logic_error,"ATTENTION: prescribed distance function set to a number smaller than 0!!",""); } array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; // Extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = (static_cast<const Node<3> & >(geom[j])).GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist > max_distance || area <1e-20) dist = max_distance; // if(it->GetValue(IS_FLUID) == 1.0) // dist = -fabs(dist); // else // dist = fabs(dist); } KRATOS_CATCH("") } //********************************************************************************** //********************************************************************************** double FindMaximumEdgeSize(ModelPart& r_model_part) { KRATOS_TRY double h_max = 0.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double h = 0.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > h) h = l; } } h = sqrt(h); if(h > h_max) h_max = h; } r_model_part.GetCommunicator().MaxAll(h_max); return h_max; KRATOS_CATCH(""); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "ParallelDistanceCalculator" << TDim << "D"; return buffer.str(); }; /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "ParallelDistanceCalculator" << TDim << "D"; }; /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {}; ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ //******************************************************************* bool IsDivided(array_1d<double,TDim+1>& dist) { unsigned int positive = 0; unsigned int negative = 0; for(unsigned int i=0; i<TDim+1; i++) { if(dist[i] >= 0) positive++; else negative++; } bool is_divided = false; if(positive > 0 && negative>0) is_divided = true; return is_divided; } //******************************************************************* bool IsActive(array_1d<double,TDim+1>& visited) { unsigned int positive = 0; for(unsigned int i=0; i<TDim+1; i++) if(visited[i] > 0.9999999999) //node was considered positive++; bool is_active = false; if(positive == TDim) is_active = true; return is_active; } //******************************************************************* void ComputeExactDistances(const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Area, Geometry<Node<3> >& geom, const array_1d<double,TDim+1>& distances, array_1d<double,TDim+1>& exact_dist ) { array_1d<double,TDim> grad_d; array_1d<double,3> coord_on_0 = ZeroVector(3); array_1d<double,3> temp; //compute the gradient of the distance and normalize it noalias(grad_d) = prod(trans(DN_DX),distances); double norm = norm_2(grad_d); grad_d /= norm; //find one division point on one edge for(unsigned int i = 1; i<TDim+1; i++) { if(distances[0]*distances[i]<=0.0) //if the edge is divided { double delta_d = fabs(distances[i]) + fabs(distances[0]); if(delta_d>1e-20) { double Ni = fabs(distances[0]) / delta_d; double N0 = fabs(distances[i]) / delta_d; noalias(coord_on_0) = N0 * geom[0].Coordinates(); noalias(coord_on_0) += Ni * geom[i].Coordinates(); } else noalias(coord_on_0) = geom[0].Coordinates(); break; } } //now calculate the distance of all the nodes from the elemental free surface for(unsigned int i = 0; i<TDim+1; i++) { noalias(temp) = geom[i].Coordinates(); noalias(temp) -= coord_on_0 ; double real_distance = 0.0; for(unsigned int k=0; k<TDim; k++) real_distance += temp[k]*grad_d[k]; real_distance = fabs(real_distance); exact_dist[i] = real_distance; } } //******************************************************************* void AddDistanceToNodesNew(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; Matrix coord_a(3,3); int row = 0; int reference_node_index; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the known node { reference_node_index = iii; for(int i_coord = 0 ; i_coord < 3 ; i_coord++) coord_a(row,i_coord) = geom[iii].Coordinates()[i_coord]; d[row] = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += d[row]; row++; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); Matrix inverse_a(3,3); double det_a; MathUtils<double>::InvertMatrix3(coord_a,inverse_a,det_a); array_1d<double,TDim> x; // normal to the surface noalias(x) = prod(inverse_a,d); double norm_x = norm_2(x); x /= norm_x; array_1d<double,TDim> v = geom[unknown_node_index].Coordinates() - geom[reference_node_index].Coordinates(); double distance = inner_prod(x,v); distance += geom[reference_node_index].FastGetSolutionStepValue(rDistanceVar); //KRATOS_WATCH(coord_a) //KRATOS_WATCH(distance) geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); //GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); } //******************************************************************* void AddDistanceToNodes(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the unknown node { const double distance = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += distance; for (unsigned int jjj = 0; jjj < TDim; jjj++) d[jjj] += DN_DX(iii, jjj) * distance; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); //finalizing computation of discriminant double c = -1.0; double a = 0.0; double b = 0.0; for (unsigned int jjj = 0; jjj < TDim; jjj++) { a += DN_DX(unknown_node_index, jjj) * DN_DX(unknown_node_index, jjj); b += d[jjj] * DN_DX(unknown_node_index, jjj); c += d[jjj] * d[jjj]; } b *= 2.0; //here we require (a*x^2 + b*x + c)^2 to be minimum (x represents the unknown distance) //this implies setting to zero //(a*x^2 + b*x + c)*(2ax+b) = 0 double distance; double discriminant = b * b - 4.0 * a*c; if (discriminant < 0.0) //here we solve (2ax+b) = 0 { // double numerator = 0.0; // double denominator = 0.0; // for(unsigned int i=0; i<TDim+1; i++) // { // for (unsigned int jjj = 0; jjj < TDim; jjj++) // { // if(i != unknown_node_index) // numerator += DN_DX(unknown_node_index, jjj) * DN_DX(i, jjj); // else // denominator += DN_DX(unknown_node_index, jjj)*DN_DX(unknown_node_index, jjj); // } // } // distance = - numerator/denominator; // // KRATOS_WATCH(geom[unknown_node_index].Id()); // KRATOS_WATCH(discriminant); distance = -b / (2.0*a); //avg_dist ; // } else //in this case we solve (a*x^2 + b*x + c)=0 { //(accurate) computation of the distance //requires the solution of a*x^2+b*x+c=0 double q, root1, root2; double sqrt_det = sqrt(discriminant); if (a != 0.0) { if (b > 0) q = -0.5 * (b + sqrt_det); else q = -0.5 * (b - sqrt_det); root1 = q / a; root2 = c / q; if (root1 > root2) distance = root1; else distance = root2; } else //in this case we have a linear equation { distance = -c / b; } } if(distance < 0.0) distance = 1e-15; geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void Check(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //check that variables needed are in the model part if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part","") KRATOS_CATCH("") } void ResetVariables(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const double MaxDistance) { KRATOS_TRY //reset the variables needed const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; //it->FastGetSolutionStepValue(rAreaVar) = 0.0; double& dist = it->FastGetSolutionStepValue(rDistanceVar); it->SetValue(rDistanceVar,dist); //here we copy the distance function to the fixed database if(dist < 0.0) it->SetValue(IS_FLUID,1.0); else it->SetValue(IS_FLUID,0.0); dist = MaxDistance; it->SetValue(IS_VISITED,0); } KRATOS_CATCH("") } void CalculateExactDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance, Flags Options) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; // double lumping_factor = 1.0/double(TDim+1); int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { if (Options.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) GeometryUtils::CalculateExactDistancesToPlane(element_geometry, dist); else GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); if (fabs(distance) > fabs(new_distance)) distance = new_distance; element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void AbsDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); distance = fabs(new_distance); element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void ExtendDistancesByLayer(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double MaxDistance) { KRATOS_TRY array_1d<double,TDim+1> visited; array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //now extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = geom[j].GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } KRATOS_CATCH("") } void AssignDistanceSign(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist < 0.0) KRATOS_THROW_ERROR(std::logic_error,"IMPOSSIBLE negative distance found !!",""); if(dist > MaxDistance || area <1e-20) //if(dist > max_distance) dist = MaxDistance; if(it->GetValue(IS_FLUID) == 1.0) dist = -fabs(dist); else dist = fabs(dist); } KRATOS_CATCH("") } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ParallelDistanceCalculator<TDim>& operator=(ParallelDistanceCalculator<TDim> const& rOther) {}; /// Copy constructor. ParallelDistanceCalculator(ParallelDistanceCalculator<TDim> const& rOther) {}; ///@} }; // Class ParallelDistanceCalculator ///@} ///@name Type Definitions ///@{ template< unsigned int TDim> const Kratos::Flags ParallelDistanceCalculator<TDim>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(0)); template< unsigned int TDim> const Kratos::Flags ParallelDistanceCalculator<TDim>::NOT_CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(0, false)); ///@} ///@name Input and output ///@{ /// input stream function template<unsigned int TDim> inline std::istream& operator >> (std::istream& rIStream, ParallelDistanceCalculator<TDim>& rThis) { return rIStream; } /// output stream function template<unsigned int TDim> inline std::ostream& operator << (std::ostream& rOStream, const ParallelDistanceCalculator<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED defined
pubkeylp.h
/** * @file pubkeylp.h -- Public key type for lattice crypto operations. * @author TPOC: [email protected] * * @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_CRYPTO_PUBKEYLP_H #define LBCRYPTO_CRYPTO_PUBKEYLP_H //Includes Section #include <vector> #include <iomanip> #include "lattice/elemparams.h" #include "lattice/ilparams.h" #include "lattice/ildcrtparams.h" #include "lattice/ilelement.h" #include "utils/inttypes.h" #include "utils/hashutil.h" #include "math/distrgen.h" #include "utils/serializablehelper.h" #include "encoding/encodingparams.h" /** * @namespace lbcrypto * The namespace of lbcrypto */ namespace lbcrypto { //forward declarations, used to resolve circular header dependencies template<typename Element> class CiphertextImpl; template<typename Element> using Ciphertext = shared_ptr<CiphertextImpl<Element>>; template<typename Element> class RationalCiphertext; template<typename Element> class LPCryptoParameters; template<typename Element> class LPCryptoParametersLTV; template<typename Element> class LPCryptoParametersBGV; template<typename Element> class LPCryptoParametersBFV; template<typename Element> class LPCryptoParametersStehleSteinfeld; template<typename Element> class CryptoObject; struct EncryptResult { explicit EncryptResult() : isValid(false), numBytesEncrypted(0) {} explicit EncryptResult(size_t len) : isValid(true), numBytesEncrypted(len) {} bool isValid; /**< whether the encryption was successful */ usint numBytesEncrypted; /**< count of the number of plaintext bytes that were encrypted */ }; /** * @brief Decryption result. This represents whether the decryption of a cipheretext was performed correctly. * * This is intended to eventually incorporate information about the amount of padding in a decoded ciphertext, * to ensure that the correct amount of padding is stripped away. * It is intended to provided a very simple kind of checksum eventually. * This notion of a decoding output is inherited from the crypto++ library. * It is also intended to be used in a recover and restart robust functionality if not all ciphertext is recieved over a lossy channel, so that if all information is eventually recieved, decoding/decryption can be performed eventually. * This is intended to be returned with the output of a decryption operation. */ struct DecryptResult { /** * Constructor that initializes all message lengths to 0. */ explicit DecryptResult() : isValid(false), messageLength(0) {} /** * Constructor that initializes all message lengths. * @param len the new length. */ explicit DecryptResult(size_t len) : isValid(true), messageLength(len) {} bool isValid; /**< whether the decryption was successful */ usint messageLength; /**< the length of the decrypted plaintext message */ }; /** * @brief Abstract interface class for LP Keys * * @tparam Element a ring element. */ template <class Element> class LPKey : public CryptoObject<Element>, public Serializable { public: LPKey(CryptoContext<Element> cc, const string& id = "") : CryptoObject<Element>(cc, id) {} LPKey(shared_ptr<CryptoObject<Element>> co) : CryptoObject<Element>(co) {} virtual ~LPKey() {} }; template<typename Element> class LPPublicKeyImpl; template<typename Element> using LPPublicKey = shared_ptr<LPPublicKeyImpl<Element>>; /** * @brief Concrete class for LP public keys * @tparam Element a ring element. */ template <typename Element> class LPPublicKeyImpl : public LPKey<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPPublicKeyImpl(CryptoContext<Element> cc, const string& id = "") : LPKey<Element>(cc, id) {} /** * Copy constructor * *@param &rhs LPPublicKeyImpl to copy from */ explicit LPPublicKeyImpl(const LPPublicKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { m_h = rhs.m_h; } /** * Move constructor * *@param &rhs LPPublicKeyImpl to move from */ explicit LPPublicKeyImpl(LPPublicKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { m_h = std::move(rhs.m_h); } /** * Assignment Operator. * * @param &rhs LPPublicKeyImpl to copy from */ const LPPublicKeyImpl<Element>& operator=(const LPPublicKeyImpl<Element> &rhs) { this->context = rhs.context; this->m_h = rhs.m_h; return *this; } /** * Move Assignment Operator. * * @param &rhs LPPublicKeyImpl to copy from */ const LPPublicKeyImpl<Element>& operator=(LPPublicKeyImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_h = std::move(rhs.m_h); return *this; } //@Get Properties /** * Gets the computed public key * @return the public key element. */ const std::vector<Element> &GetPublicElements() const { return this->m_h; } //@Set Properties /** * Sets the public key vector of Element. * @param &element is the public key Element vector to be copied. */ void SetPublicElements(const std::vector<Element> &element) { m_h = element; } /** * Sets the public key vector of Element. * @param &&element is the public key Element vector to be moved. */ void SetPublicElements(std::vector<Element> &&element) { m_h = std::move(element); } /** * Sets the public key Element at index idx. * @param &element is the public key Element to be copied. */ void SetPublicElementAtIndex(usint idx, const Element &element) { m_h.insert(m_h.begin() + idx, element); } /** * Sets the public key Element at index idx. * @param &&element is the public key Element to be moved. */ void SetPublicElementAtIndex(usint idx, Element &&element) { m_h.insert(m_h.begin() + idx, std::move(element)); } /** * Serialize the object into a Serialized * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @param fileFlag is an object-specific parameter for the serialization * @return true if successfully serialized */ bool Serialize(Serialized *serObj) const; /** * Populate the object from the deserialization of the Serialized * @param &serObj contains the serialized object * @return true on success */ bool Deserialize(const Serialized &serObj); bool operator==(const LPPublicKeyImpl& other) const { if( !CryptoObject<Element>::operator ==(other) ) return false; if( m_h.size() != other.m_h.size() ) return false; for( size_t i = 0; i < m_h.size(); i++ ) if( m_h[i] != other.m_h[i] ) return false; return true; } bool operator!=(const LPPublicKeyImpl& other) const { return ! (*this == other); } private: std::vector<Element> m_h; }; template<typename Element> class LPEvalKeyImpl; template<typename Element> using LPEvalKey = shared_ptr<LPEvalKeyImpl<Element>>; /** * @brief Abstract interface for LP evaluation/proxy keys * @tparam Element a ring element. */ template <class Element> class LPEvalKeyImpl : public LPKey<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc) {} virtual ~LPEvalKeyImpl() {} /** * Setter function to store Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { throw std::runtime_error("SetAVector copy operation not supported"); } /** * Setter function to store Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { throw std::runtime_error("SetAVector move operation not supported"); } /** * Getter function to access Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { throw std::runtime_error("GetAVector operation not supported"); } /** * Setter function to store Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @param &b is the Element vector to be copied. */ virtual void SetBVector(const std::vector<Element> &b) { throw std::runtime_error("SetBVector copy operation not supported"); } /** * Setter function to store Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @param &&b is the Element vector to be moved. */ virtual void SetBVector(std::vector<Element> &&b) { throw std::runtime_error("SetBVector move operation not supported"); } /** * Getter function to access Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @return Element vector B. */ virtual const std::vector<Element> &GetBVector() const { throw std::runtime_error("GetBVector operation not supported"); } /** * Setter function to store key switch Element. * Throws exception, to be overridden by derived class. * * @param &a is the Element to be copied. */ virtual void SetA(const Element &a) { throw std::runtime_error("SetA copy operation not supported"); } /** * Setter function to store key switch Element. * Throws exception, to be overridden by derived class. * * @param &&a is the Element to be moved. */ virtual void SetA(Element &&a) { throw std::runtime_error("SetA move operation not supported"); } /** * Getter function to access key switch Element. * Throws exception, to be overridden by derived class. * * @return Element. */ virtual const Element &GetA() const { throw std::runtime_error("GetA operation not supported"); } friend bool operator==(const LPEvalKeyImpl& a, const LPEvalKeyImpl& b) { return a.key_compare(b); } friend bool operator!=(const LPEvalKeyImpl& a, LPEvalKeyImpl& b) { return ! (a == b); } virtual bool key_compare(const LPEvalKeyImpl& other) const = 0; }; template<typename Element> class LPEvalKeyRelinImpl; template<typename Element> using LPEvalKeyRelin = shared_ptr<LPEvalKeyRelinImpl<Element>>; /** * @brief Concrete class for Relinearization keys of RLWE scheme * @tparam Element a ring element. */ template <class Element> class LPEvalKeyRelinImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyRelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyRelinImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyRelinImpl(const LPEvalKeyRelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = rhs.m_rKey; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyRelinImpl(LPEvalKeyRelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = std::move(rhs.m_rKey); } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyRelinImpl<Element>& operator=(const LPEvalKeyRelinImpl<Element> &rhs) { this->context = rhs.context; this->m_rKey = rhs.m_rKey; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyRelinImpl<Element>& operator=(LPEvalKeyRelinImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_rKey = std::move(rhs.m_rKey); return *this; } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { m_rKey.insert(m_rKey.begin() + 0, a); } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { m_rKey.insert(m_rKey.begin() + 0, std::move(a)); } /** * Getter function to access Relinearization Element Vector A. * Overrides base class implementation. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { return m_rKey.at(0); } /** * Setter function to store Relinearization Element Vector B. * Overrides base class implementation. * * @param &b is the Element vector to be copied. */ virtual void SetBVector(const std::vector<Element> &b) { m_rKey.insert(m_rKey.begin() + 1, b); } /** * Setter function to store Relinearization Element Vector B. * Overrides base class implementation. * * @param &&b is the Element vector to be moved. */ virtual void SetBVector(std::vector<Element> &&b) { m_rKey.insert(m_rKey.begin() + 1, std::move(b)); } /** * Getter function to access Relinearization Element Vector B. * Overrides base class implementation. * * @return Element vector B. */ virtual const std::vector<Element> &GetBVector() const { return m_rKey.at(1); } /** * Serialize the object into a Serialized * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool Serialize(Serialized *serObj) const; /** * SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool SerializeWithoutContext(Serialized *serObj) const; /** * Deserialize from the serialization * @param serObj - contains the serialization * @return true on success */ bool Deserialize(const Serialized &serObj); bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyRelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyRelinImpl<Element> &>(other); if( !CryptoObject<Element>::operator==(other) ) return false; if( this->m_rKey.size() != oth.m_rKey.size() ) return false; for( size_t i=0; i<this->m_rKey.size(); i++ ) { if( this->m_rKey[i].size() != oth.m_rKey[i].size() ) return false; for( size_t j=0; j<this->m_rKey[i].size(); j++ ) { if( this->m_rKey[i][j] != oth.m_rKey[i][j] ) return false; } } return true; } private: //private member to store vector of vector of Element. std::vector< std::vector<Element> > m_rKey; }; template<typename Element> class LPEvalKeyNTRURelinImpl; template<typename Element> using LPEvalKeyNTRURelin = shared_ptr<LPEvalKeyNTRURelinImpl<Element>>; /** * @brief Evaluation Relinearization keys for NTRU scheme. * @tparam Element a ring element. */ template <class Element> class LPEvalKeyNTRURelinImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyNTRURelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyNTRURelinImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyNTRURelinImpl(const LPEvalKeyNTRURelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = rhs.m_rKey; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyNTRURelinImpl(LPEvalKeyNTRURelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = std::move(rhs.m_rKey); } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyNTRURelinImpl<Element>& operator=(const LPEvalKeyNTRURelinImpl<Element> &rhs) { this->context = rhs.context; this->m_rKey = rhs.m_rKey; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyNTRURelinImpl<Element>& operator=(LPEvalKeyNTRURelinImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_rKey = std::move(rhs.m_rKey); return *this; } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { for (usint i = 0; i < a.size(); i++) { m_rKey.insert(m_rKey.begin() + i, a.at(i)); } } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { m_rKey = std::move(a); } /** * Getter function to access Relinearization Element Vector A. * Overrides base class implementation. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { return m_rKey; } /** * Serialize the object into a Serialized * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool Serialize(Serialized *serObj) const; /** * SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool SerializeWithoutContext(Serialized *serObj) const; /** * Deserialize from the serialization * @param serObj - contains the serialization * @return true on success */ bool Deserialize(const Serialized &serObj); bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyNTRURelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRURelinImpl<Element> &>(other); if( !CryptoObject<Element>::operator ==(other) ) return false; if( this->m_rKey.size() != oth.m_rKey.size() ) return false; for( size_t i=0; i<this->m_rKey.size(); i++ ) { if( this->m_rKey[i] != oth.m_rKey[i] ) return false; } return true; } private: //private member to store vector of Element. std::vector<Element> m_rKey; }; template<typename Element> class LPEvalKeyNTRUImpl; template<typename Element> using LPEvalKeyNTRU = shared_ptr<LPEvalKeyNTRUImpl<Element>>; /** * @brief Concrete class for facilitating NTRU key switch. * @tparam Element a ring element. */ template <class Element> class LPEvalKeyNTRUImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyNTRUImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyNTRUImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyNTRUImpl(const LPEvalKeyNTRUImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_Key = rhs.m_Key; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyNTRUImpl(LPEvalKeyNTRUImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_Key = std::move(rhs.m_Key); } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyNTRUImpl<Element>& operator=(const LPEvalKeyNTRUImpl<Element> &rhs) { this->context = rhs.context; this->m_Key = rhs.m_Key; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyNTRUImpl<Element>& operator=(LPEvalKeyNTRUImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_Key = std::move(rhs.m_Key); return *this; } /** * Setter function to store NTRU key switch element. * Function copies the key. * Overrides the virtual function from base class LPEvalKeyImpl. * * @param &a is the key switch element to be copied. */ virtual void SetA(const Element &a) { m_Key = a; } /** * Setter function to store NTRU key switch Element. * Function moves the key. * Overrides the virtual function from base class LPEvalKeyImpl. * * @param &&a is the key switch Element to be moved. */ virtual void SetA(Element &&a) { m_Key = std::move(a); } /** * Getter function to access NTRU key switch Element. * Overrides the virtual function from base class LPEvalKeyImpl. * * @return NTRU key switch Element. */ virtual const Element& GetA() const { return m_Key; } /** * Serialize the object into a Serialized * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool Serialize(Serialized *serObj) const; /** * SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool SerializeWithoutContext(Serialized *serObj) const; /** * Deserialize from the serialization * @param serObj - contains the serialization * @return true on success */ bool Deserialize(const Serialized &serObj); bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyNTRUImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRUImpl<Element> &>(other); if( !CryptoObject<Element>::operator ==(other) ) return false; if( this->m_Key != oth.m_Key ) return false; return true; } private: /** * private member Element to store key. */ Element m_Key; }; template<typename Element> class LPPrivateKeyImpl; template<typename Element> using LPPrivateKey = shared_ptr<LPPrivateKeyImpl<Element>>; /** * @brief Private key implementation template for Ring-LWE, NTRU-based schemes, * @tparam Element a ring element. */ template <class Element> class LPPrivateKeyImpl : public LPKey<Element> { public: /** * Construct in context */ LPPrivateKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc, GenerateUniqueKeyID()) {} /** * Copy constructor *@param &rhs the LPPrivateKeyImpl to copy from */ explicit LPPrivateKeyImpl(const LPPrivateKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { this->m_sk = rhs.m_sk; } /** * Move constructor *@param &rhs the LPPrivateKeyImpl to move from */ explicit LPPrivateKeyImpl(LPPrivateKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { this->m_sk = std::move(rhs.m_sk); } /** * Assignment Operator. * * @param &rhs LPPrivateKeyto assign from. * @return the resulting LPPrivateKeyImpl */ const LPPrivateKeyImpl<Element>& operator=(const LPPrivateKeyImpl<Element> &rhs) { CryptoObject<Element>::operator=(rhs); this->m_sk = rhs.m_sk; return *this; } /** * Move Assignment Operator. * * @param &rhs LPPrivateKeyImpl to assign from. * @return the resulting LPPrivateKeyImpl */ const LPPrivateKeyImpl<Element>& operator=(LPPrivateKeyImpl<Element> &&rhs) { CryptoObject<Element>::operator=(rhs); this->m_sk = std::move(rhs.m_sk); return *this; } /** * Implementation of the Get accessor for private element. * @return the private element. */ const Element & GetPrivateElement() const { return m_sk; } /** * Set accessor for private element. * @private &x private element to set to. */ void SetPrivateElement(const Element &x) { m_sk = x; } /** * Set accessor for private element. * @private &x private element to set to. */ void SetPrivateElement(Element &&x) { m_sk = std::move(x); } /** * Serialize the object into a Serialized * @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool Serialize(Serialized *serObj) const; /** * Populate the object from the deserialization of the Setialized * @param &serObj contains the serialized object * @return true on success */ bool Deserialize(const Serialized &serObj); bool operator==(const LPPrivateKeyImpl& other) const { return CryptoObject<Element>::operator ==(other) && m_sk == other.m_sk; } bool operator!=(const LPPrivateKeyImpl& other) const { return ! (*this == other); } private: static const size_t intsInID = 128 / (sizeof(uint32_t) * 8); static string GenerateUniqueKeyID() { std::uniform_int_distribution<uint32_t> distribution(0, std::numeric_limits<uint32_t>::max()); std::stringstream s; s.fill('0'); s << std::hex; for( size_t i = 0; i < intsInID; i++ ) s << std::setw(8) << distribution(PseudoRandomNumberGenerator::GetPRNG()); return s.str(); } Element m_sk; }; template <class Element> class LPKeyPair { public: LPPublicKey<Element> publicKey; LPPrivateKey<Element> secretKey; LPKeyPair(LPPublicKeyImpl<Element>* a=0, LPPrivateKeyImpl<Element>* b=0): publicKey(a), secretKey(b) {} bool good() { return publicKey && secretKey; } }; /** * @brief Abstract interface for parameter generation algorithm * @tparam Element a ring element. */ template <class Element> class LPParameterGenerationAlgorithm { public: virtual ~LPParameterGenerationAlgorithm() {} /** * Method for computing all derived parameters based on chosen primitive parameters * * @param *cryptoParams the crypto parameters object to be populated with parameters. * @param evalAddCount number of EvalAdds assuming no EvalMult and KeySwitch operations are performed. * @param evalMultCount number of EvalMults assuming no EvalAdd and KeySwitch operations are performed. * @param keySwitchCount number of KeySwitch operations assuming no EvalAdd and EvalMult operations are performed. */ virtual bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0, int32_t evalMultCount = 0, int32_t keySwitchCount = 0) const = 0; }; /** * @brief Abstract interface for encryption algorithm * @tparam Element a ring element. */ template <class Element> class LPEncryptionAlgorithm { public: virtual ~LPEncryptionAlgorithm() {} /** * Method for encrypting plaintext using LBC * * @param&publicKey public key used for encryption. * @param plaintext copy of the plaintext element. NOTE a copy is passed! That is NOT an error! * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @param *ciphertext ciphertext which results from encryption. */ virtual Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Element plaintext) const = 0; /** * Method for encrypting plaintex using LBC * * @param privateKey private key used for encryption. * @param plaintext copy of the plaintext input. NOTE a copy is passed! That is NOT an error! * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @param *ciphertext ciphertext which results from encryption. */ virtual Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Element plaintext) const = 0; /** * Method for decrypting plaintext using LBC * * @param &privateKey private key used for decryption. * @param &ciphertext ciphertext id decrypted. * @param *plaintext the plaintext output. * @return the decoding result. */ virtual DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext, NativePoly *plaintext) const = 0; /** * Function to generate public and private keys * * @param &publicKey private key used for decryption. * @param &privateKey private key used for decryption. * @return function ran correctly. */ virtual LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse=false) = 0; }; /** * @brief Abstract interface for Leveled SHE operations * @tparam Element a ring element. */ template <class Element> class LPLeveledSHEAlgorithm { public: virtual ~LPLeveledSHEAlgorithm() {} /** * Method for Modulus Reduction. * * @param &cipherText Ciphertext to perform mod reduce on. */ virtual Ciphertext<Element> ModReduce(Ciphertext<Element> cipherText) const = 0; /** * Method for Ring Reduction. * * @param &cipherText Ciphertext to perform ring reduce on. * @param &privateKey Private key used to encrypt the first argument. */ virtual Ciphertext<Element> RingReduce(Ciphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const = 0; /** * Method for Composed EvalMult * * @param &cipherText1 ciphertext1, first input ciphertext to perform multiplication on. * @param &cipherText2 cipherText2, second input ciphertext to perform multiplication on. * @param &quadKeySwitchHint is for resultant quadratic secret key after multiplication to the secret key of the particular level. * @param &cipherTextResult is the resulting ciphertext that can be decrypted with the secret key of the particular level. */ virtual Ciphertext<Element> ComposedEvalMult( const Ciphertext<Element> cipherText1, const Ciphertext<Element> cipherText2, const LPEvalKey<Element> quadKeySwitchHint) const = 0; /** * Method for Level Reduction from sk -> sk1. This method peforms a keyswitch on the ciphertext and then performs a modulus reduction. * * @param &cipherText1 is the original ciphertext to be key switched and mod reduced. * @param &linearKeySwitchHint is the linear key switch hint to perform the key switch operation. * @param &cipherTextResult is the resulting ciphertext. */ virtual Ciphertext<Element> LevelReduce(const Ciphertext<Element> cipherText1, const LPEvalKey<Element> linearKeySwitchHint) const = 0; /** * Function that determines if security requirements are met if ring dimension is reduced by half. * * @param ringDimension is the original ringDimension * @param &moduli is the vector of moduli that is used * @param rootHermiteFactor is the security threshold */ virtual bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const = 0; }; /** * @brief Abstract interface class for LBC PRE algorithms * @tparam Element a ring element. */ template <class Element> class LPPREAlgorithm { public: virtual ~LPPREAlgorithm() {} /** * Virtual function to generate 1..log(q) encryptions for each bit of the original private key. * Variant that uses the new secret key directly. * * @param &newKey new private key for the new ciphertext. * @param &origPrivateKey original private key used for decryption. * @param *evalKey the evaluation key. * @return the re-encryption key. */ virtual LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const = 0; /** * Virtual function to generate 1..log(q) encryptions for each bit of the original private key * Variant that uses the public key for the new secret key. * * @param &newKey public key for the new secret key. * @param &origPrivateKey original private key used for decryption. * @param *evalKey the evaluation key. * @return the re-encryption key. */ virtual LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const = 0; /** * Virtual function to define the interface for re-encypting ciphertext using the array generated by ProxyGen * * @param &evalKey proxy re-encryption key. * @param &ciphertext the input ciphertext. * @param *newCiphertext the new ciphertext. */ virtual Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey, const Ciphertext<Element> ciphertext) const = 0; }; /** * @brief Abstract interface class for LBC Multiparty algorithms. A version of this multiparty scheme built on the BGV scheme is seen here: * - Asharov G., Jain A., López-Alt A., Tromer E., Vaikuntanathan V., Wichs D. (2012) Multiparty Computation with Low Communication, Computation and Interaction via Threshold FHE. In: Pointcheval D., Johansson T. (eds) Advances in Cryptology – EUROCRYPT 2012. EUROCRYPT 2012. Lecture Notes in Computer Science, vol 7237. Springer, Berlin, Heidelberg * * During offline key generation, this multiparty scheme relies on the clients coordinating their public key generation. To do this, a single client generates a public-secret key pair. * This public key is shared with other keys which use an element in the public key to generate their own public keys. * The clients generate a shared key pair using a scheme-specific approach, then generate re-encryption keys. Re-encryption keys are uploaded to the server. * Clients encrypt data with their public keys and send the encrypted data server. * The data is re-encrypted. Computations are then run on the data. * The result is sent to each of the clients. * One client runs a "Leader" multiparty decryption operation with its own secret key. All other clients run a regular "Main" multiparty decryption with their own secret key. * The resulting partially decrypted ciphertext are then fully decrypted with the decryption fusion algorithms. * * @tparam Element a ring element. */ template <class Element> class LPMultipartyAlgorithm { public: virtual ~LPMultipartyAlgorithm() {} /** * Function to generate public and private keys for multiparty homomrophic encryption in coordination with a leading client that generated a first public key. * * @param cc cryptocontext for the keys to be generated. * @param pk1 private key used for decryption to be fused. * @param makeSparse set to true if ring reduce by a factor of 2 is to be used. * @param pre set to true if proxy re-encryption is used in multi-party protocol * @return key pair including the private and public key */ virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const LPPublicKey<Element> pk1, bool makeSparse=false, bool pre=false) = 0; /** * Function to generate public and private keys for multiparty homomrophic encryption server key pair in coordination with secret keys of clients. * * @param cc cryptocontext for the keys to be generated. * @param secretkeys private keys used for decryption to be fused. * @param makeSparse set to true if ring reduce by a factor of 2 is to be used. * @return key pair including the private and public key */ virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const vector<LPPrivateKey<Element>>& secretKeys, bool makeSparse=false) = 0; /** * Method for main decryption operation run by most decryption clients for multiparty homomorphic encryption * * @param privateKey private key used for decryption. * @param ciphertext ciphertext id decrypted. */ virtual Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext) const = 0; /** * Method for decryption operation run by the lead decryption client for multiparty homomorphic encryption * * @param privateKey private key used for decryption. * @param ciphertext ciphertext id decrypted. */ virtual Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext) const = 0; /** * Method for fusing the partially decrypted ciphertext. * * @param &ciphertextVec ciphertext id decrypted. * @param *plaintext the plaintext output. * @return the decoding result. */ virtual DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec, NativePoly *plaintext) const = 0; }; /** * @brief Abstract interface class for LBC SHE algorithms * @tparam Element a ring element. */ template <class Element> class LPSHEAlgorithm { public: virtual ~LPSHEAlgorithm() {} /** * Virtual function to define the interface for homomorphic addition of ciphertexts. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for homomorphic addition of ciphertexts. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const = 0; /** * Virtual function to define the interface for homomorphic subtraction of ciphertexts. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for homomorphic subtraction of ciphertexts. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const = 0; /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for multiplication of ciphertext by plaintext. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const = 0; /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key. * * @param &ciphertext1 first input ciphertext. * @param &ciphertext2 second input ciphertext. * @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2, const LPEvalKey<Element> ek) const = 0; /** * Virtual function for evaluating multiplication of a ciphertext list which each multiplication is followed by relinearization operation. * * @param cipherTextList is the ciphertext list. * @param evalKeys is the evaluation key to make the newCiphertext * decryptable by the same secret key as that of ciphertext list. * @param *newCiphertext the new resulting ciphertext. */ virtual Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& cipherTextList, const vector<LPEvalKey<Element>> &evalKeys) const = 0; /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * @param ek is the evaluation key to make the newCiphertext * decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @param *newCiphertext the new resulting ciphertext. */ virtual Ciphertext<Element> EvalMultAndRelinearize(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const = 0; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { // multiplication is done in reverse order to minimize the number of inner products Matrix<RationalCiphertext<Element>> xTransposed = x->Transpose(); shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(xTransposed * (*y))); Matrix<RationalCiphertext<Element>> xCovariance = xTransposed * (*x); Matrix<RationalCiphertext<Element>> cofactorMatrix = xCovariance.CofactorMatrix(); Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose(); *result = adjugateMatrix * (*result); RationalCiphertext<Element> determinant; xCovariance.Determinant(&determinant); for (size_t row = 0; row < result->GetRows(); row++) for (size_t col = 0; col < result->GetCols(); col++) (*result)(row, col).SetDenominator(determinant.GetNumerator()); return result; } /** * Virtual function to define the interface for homomorphic negation of ciphertext. * * @param &ciphertext the input ciphertext. * @param *newCiphertext the new ciphertext. */ virtual Ciphertext<Element> EvalNegate(const Ciphertext<Element> ciphertext) const = 0; /** * Function to add random noise to all plaintext slots except for the first one; used in EvalInnerProduct * * @param &ciphertext the input ciphertext. * @return modified ciphertext */ Ciphertext<Element> AddRandomNoise(const Ciphertext<Element> ciphertext) const { string kID = ciphertext->GetKeyTag(); const auto cryptoParams = ciphertext->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint n = elementParams->GetRingDimension(); auto cc = ciphertext->GetCryptoContext(); DiscreteUniformGenerator dug; dug.SetModulus(encodingParams->GetPlaintextModulus()); BigVector randomVector = dug.GenerateVector(n - 1); std::vector<uint64_t> randomIntVector(n); //first plaintext slot does not need to change randomIntVector[0] = 0; for (usint i = 0; i < n - 1; i++) { randomIntVector[i + 1] = randomVector[i].ConvertToInt(); } Plaintext plaintext = cc->MakePackedPlaintext(randomIntVector); plaintext->Encode(); plaintext->GetElement<Element>().SetFormat(EVALUATION); auto ans = EvalAdd(ciphertext, plaintext); return ans; }; /** * Method for KeySwitchGen * * @param &originalPrivateKey Original private key used for encryption. * @param &newPrivateKey New private key to generate the keyswitch hint. * @param *KeySwitchHint is where the resulting keySwitchHint will be placed. */ virtual LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey) const = 0; /** * Method for KeySwitch * * @param &keySwitchHint Hint required to perform the ciphertext switching. * @param &cipherText Original ciphertext to perform switching on. */ virtual Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, const Ciphertext<Element> cipherText) const = 0; /** * Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme). * Function to generate 1..log(q) encryptions for each bit of the original private key * * @param &newPublicKey encryption key for the new ciphertext. * @param origPrivateKey original private key used for decryption. */ virtual LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newPublicKey, const LPPrivateKey<Element> origPrivateKey) const = 0; /** * Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme). * * @param evalKey the evaluation key. * @param ciphertext the input ciphertext. * @return the resulting Ciphertext */ virtual Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey, const Ciphertext<Element> ciphertext) const = 0; /** * Virtual function to define the interface for generating a evaluation key which is used after each multiplication. * * @param &ciphertext1 first input ciphertext. * @param &ciphertext2 second input ciphertext. * @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @param *newCiphertext the new resulting ciphertext. */ virtual LPEvalKey<Element> EvalMultKeyGen( const LPPrivateKey<Element> originalPrivateKey) const = 0; /** * Virtual function to define the interface for generating a evaluation key which is used after each multiplication for depth more than 2. * * @param &originalPrivateKey Original private key used for encryption. * @param *evalMultKeys the resulting evalution key vector list. */ virtual vector<LPEvalKey<Element>> EvalMultKeysGen( const LPPrivateKey<Element> originalPrivateKey) const = 0; /** * Virtual function to generate all isomorphism keys for a given private key * * @param publicKey encryption key for the new ciphertext. * @param origPrivateKey original private key used for decryption. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const = 0; /** * Virtual function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ virtual Ciphertext<Element> EvalAutomorphism(const Ciphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const = 0; /** * Virtual function to generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const = 0; /** * Virtual function to generate the automorphism keys for EvalSum; works only for packed encoding * * @param privateKey private key. * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey) const { const auto cryptoParams = privateKey->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint batchSize = encodingParams->GetBatchSize(); usint m = elementParams->GetCyclotomicOrder(); // stores automorphism indices needed for EvalSum std::vector<usint> indices; if (!(m & (m-1))){ // Check if m is a power of 2 indices = GenerateIndices_2n(batchSize, m); } else { // Arbitray cyclotomics usint g = encodingParams->GetPlaintextGenerator(); for (int i = 0; i < floor(log2(batchSize)); i++) { indices.push_back(g); g = (g * g) % m; } } if (publicKey) // NTRU-based scheme return EvalAutomorphismKeyGen(publicKey, privateKey, indices); else // Regular RLWE scheme return EvalAutomorphismKeyGen(privateKey, indices); } /** * Sums all elements in log (batch size) time - works only with packed encoding * * @param ciphertext the input ciphertext. * @param batchSize size of the batch to be summed up * @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalSum(const Ciphertext<Element> ciphertext, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertext->GetCryptoParameters(); Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext)); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint m = elementParams->GetCyclotomicOrder(); if ((encodingParams->GetBatchSize() == 0)) throw std::runtime_error("EvalSum: Packed encoding parameters 'batch size' is not set; Please check the EncodingParams passed to the crypto context."); else { if (!(m & (m-1))){ // Check if m is a power of 2 newCiphertext = EvalSum_2n(batchSize, m, evalKeys,newCiphertext); } else { // Arbitray cyclotomics if (encodingParams->GetPlaintextGenerator() == 0) throw std::runtime_error("EvalSum: Packed encoding parameters 'plaintext generator' is not set; Please check the EncodingParams passed to the crypto context."); else { usint g = encodingParams->GetPlaintextGenerator(); for (int i = 0; i < floor(log2(batchSize)); i++) { auto ea = EvalAutomorphism(newCiphertext, g, evalKeys); newCiphertext = EvalAdd(newCiphertext, ea); g = (g * g) % m; } } } } return newCiphertext; } /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2, evalMultKey); result = EvalSum(result, batchSize, evalSumKeys); // add a random number to all slots except for the first one so that no information is leaked result = AddRandomNoise(result); return result; } /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 plaintext. * @param batchSize size of the batch to be summed up * @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Plaintext ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const { Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2); result = EvalSum(result, batchSize, evalSumKeys); // add a random number to all slots except for the first one so that no information is leaked return AddRandomNoise(result); } /** * EvalLinRegressBatched - Computes the parameter vector for linear regression using the least squares method * Currently supports only two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { Matrix<RationalCiphertext<Element>> covarianceMatrix(x->GetAllocator(), 2, 2); Ciphertext<Element> x0 = (*x)(0, 0).GetNumerator(); Ciphertext<Element> x1 = (*x)(0, 1).GetNumerator(); Ciphertext<Element> y0 = (*y)(0, 0).GetNumerator(); //Compute the covariance matrix for X covarianceMatrix(0, 0).SetNumerator(EvalInnerProduct(x0, x0, batchSize, evalSumKeys, evalMultKey)); covarianceMatrix(0, 1).SetNumerator(EvalInnerProduct(x0, x1, batchSize, evalSumKeys, evalMultKey)); covarianceMatrix(1, 0) = covarianceMatrix(0, 1); covarianceMatrix(1, 1).SetNumerator(EvalInnerProduct(x1, x1, batchSize, evalSumKeys, evalMultKey)); Matrix<RationalCiphertext<Element>> cofactorMatrix = covarianceMatrix.CofactorMatrix(); Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose(); shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(x->GetAllocator(), 2, 1)); (*result)(0, 0).SetNumerator(EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey)); (*result)(1, 0).SetNumerator(EvalInnerProduct(x1, y0, batchSize, evalSumKeys, evalMultKey)); *result = adjugateMatrix * (*result); RationalCiphertext<Element> determinant; covarianceMatrix.Determinant(&determinant); for (size_t row = 0; row < result->GetRows(); row++) for (size_t col = 0; col < result->GetCols(); col++) (*result)(row, col).SetDenominator(determinant.GetNumerator()); return result; } /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors * @param evalSumKeys - evaluation keys used for the automorphism operation * @param evalMultKey - the evaluation key used for multiplication * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart, usint length, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (length == 0) length = x->GetRows(); if (length - indexStart > x->GetRows()) throw std::runtime_error("The number of rows exceeds the dimension of the vector"); //additional error checking can be added here Ciphertext<Element> result; Ciphertext<Element> x0 = (*x)(indexStart, 0).GetNumerator(); Ciphertext<Element> y0 = (*y)(indexStart, 0).GetNumerator(); result = EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey); #pragma omp parallel for ordered schedule(dynamic) for (usint i = indexStart + 1; i < indexStart + length; i++) { Ciphertext<Element> xi = (*x)(i, 0).GetNumerator(); Ciphertext<Element> yi = (*y)(i, 0).GetNumerator(); auto product = EvalInnerProduct(xi, yi, batchSize, evalSumKeys, evalMultKey); #pragma omp ordered { result = EvalAdd(result,product); } } return result; } private: std::vector<usint> GenerateIndices_2n(usint batchSize, usint m) const { // stores automorphism indices needed for EvalSum std::vector<usint> indices; usint g = 5; for (int i = 0; i < floor(log2(batchSize)) - 1; i++) { indices.push_back(g); g = (g * g) % m; } if (2*batchSize<m) indices.push_back(g); indices.push_back(3); return indices; } Ciphertext<Element> EvalSum_2n(usint batchSize, usint m, const std::map<usint, LPEvalKey<Element>> &evalKeys, const Ciphertext<Element> ciphertext) const{ Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext)); usint g = 5; for (int i = 0; i < floor(log2(batchSize)) - 1; i++) { newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys)); g = (g * g) % m; } if (2*batchSize<m) newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys)); newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, 3, evalKeys)); return newCiphertext; } }; /** * @brief Abstract interface class for LBC SHE algorithms * @tparam Element a ring element. */ template <class Element> class LPFHEAlgorithm { public: virtual ~LPFHEAlgorithm() {} /** * Virtual function to define the interface for bootstrapping evaluation of ciphertext * * @param &ciphertext the input ciphertext. * @param *newCiphertext the new ciphertext. */ virtual void Bootstrap(const Ciphertext<Element> &ciphertext, Ciphertext<Element> *newCiphertext) const = 0; }; /** * @brief main implementation class to capture essential cryptoparameters of any LBC system * @tparam Element a ring element. */ template <typename Element> class LPCryptoParameters : public Serializable { public: virtual ~LPCryptoParameters() {} /** * Returns the value of plaintext modulus p * * @return the plaintext modulus. */ const PlaintextModulus &GetPlaintextModulus() const { return m_encodingParams->GetPlaintextModulus(); } /** * Returns the reference to IL params * * @return the ring element parameters. */ const shared_ptr<typename Element::Params> GetElementParams() const { return m_params; } /** * Returns the reference to encoding params * * @return the encoding parameters. */ const EncodingParams GetEncodingParams() const { return m_encodingParams; } /** * Sets the value of plaintext modulus p */ void SetPlaintextModulus(const PlaintextModulus &plaintextModulus) { m_encodingParams->SetPlaintextModulus(plaintextModulus); } virtual bool operator==(const LPCryptoParameters<Element>& cmp) const = 0; bool operator!=(const LPCryptoParameters<Element>& cmp) const { return !(*this == cmp); } /** * Overload to allow printing of parameters to an iostream * NOTE that the implementation relies on calling the virtual PrintParameters method * @param out - the stream to print to * @param item - reference to the item to print * @return the stream */ friend std::ostream& operator<<(std::ostream& out, const LPCryptoParameters& item) { item.PrintParameters(out); return out; } virtual usint GetRelinWindow() const { return 0; } virtual const typename Element::DggType &GetDiscreteGaussianGenerator() const { throw std::logic_error("No DGG Available for this parameter set"); } /** * Sets the reference to element params */ void SetElementParams(shared_ptr<typename Element::Params> params) { m_params = params; } /** * Sets the reference to encoding params */ void SetEncodingParams(EncodingParams encodingParams) { m_encodingParams = encodingParams; } protected: LPCryptoParameters(const PlaintextModulus &plaintextModulus = 2) { m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) ); } LPCryptoParameters(shared_ptr<typename Element::Params> params, const PlaintextModulus &plaintextModulus) { m_params = params; m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) ); } LPCryptoParameters(shared_ptr<typename Element::Params> params, EncodingParams encodingParams) { m_params = params; m_encodingParams = encodingParams; } LPCryptoParameters(LPCryptoParameters<Element> *from, shared_ptr<typename Element::Params> newElemParms) { *this = *from; m_params = newElemParms; } virtual void PrintParameters(std::ostream& out) const { out << "Element Parameters: " << *m_params << std::endl; out << "Encoding Parameters: " << *m_encodingParams << std::endl; } private: //element-specific parameters shared_ptr<typename Element::Params> m_params; //encoding-specific parameters EncodingParams m_encodingParams; }; /** * @brief Abstract interface for public key encryption schemes * @tparam Element a ring element. */ template <class Element> class LPPublicKeyEncryptionScheme { public: LPPublicKeyEncryptionScheme() : m_algorithmParamsGen(0), m_algorithmEncryption(0), m_algorithmPRE(0), m_algorithmMultiparty(0), m_algorithmSHE(0), m_algorithmFHE(0), m_algorithmLeveledSHE(0) {} virtual ~LPPublicKeyEncryptionScheme() { if (this->m_algorithmParamsGen != NULL) delete this->m_algorithmParamsGen; if (this->m_algorithmEncryption != NULL) delete this->m_algorithmEncryption; if (this->m_algorithmPRE != NULL) delete this->m_algorithmPRE; if (this->m_algorithmMultiparty != NULL) delete this->m_algorithmMultiparty; if (this->m_algorithmSHE != NULL) delete this->m_algorithmSHE; if (this->m_algorithmFHE != NULL) delete this->m_algorithmFHE; if (this->m_algorithmLeveledSHE != NULL) delete this->m_algorithmLeveledSHE; } virtual bool operator==(const LPPublicKeyEncryptionScheme& sch) const = 0; bool operator!=(const LPPublicKeyEncryptionScheme& sch) const { return !(*this == sch); } /** * Enable features with a bit mast of PKESchemeFeature codes * @param mask */ void Enable(usint mask) { if (mask&ENCRYPTION) Enable(ENCRYPTION); if (mask&PRE) Enable(PRE); if (mask&SHE) Enable(SHE); if (mask&LEVELEDSHE) Enable(LEVELEDSHE); if (mask&MULTIPARTY) Enable(MULTIPARTY); if (mask&FHE) Enable(FHE); } usint GetEnabled() const { usint flag = 0; if (m_algorithmEncryption != NULL) flag |= ENCRYPTION; if (m_algorithmPRE != NULL) flag |= PRE; if (m_algorithmSHE != NULL) flag |= SHE; if (m_algorithmFHE != NULL) flag |= FHE; if (m_algorithmLeveledSHE != NULL) flag |= LEVELEDSHE; if (m_algorithmMultiparty != NULL) flag |= MULTIPARTY; return flag; } //instantiated in the scheme implementation class virtual void Enable(PKESchemeFeature feature) = 0; ///////////////////////////////////////// // wrapper for LPParameterSelectionAlgorithm // bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0, int32_t evalMultCount = 0, int32_t keySwitchCount = 0) const { if (this->m_algorithmParamsGen) { return this->m_algorithmParamsGen->ParamsGen(cryptoParams, evalAddCount, evalMultCount, keySwitchCount); } else { throw std::logic_error("Parameter generation operation has not been implemented"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPEncryptionAlgorithm (ENCRYPT) // Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, const Element &plaintext) const { if(this->m_algorithmEncryption) { return this->m_algorithmEncryption->Encrypt(publicKey,plaintext); } else { throw std::logic_error("Encrypt operation has not been enabled"); } } Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, const Element &plaintext) const { if(this->m_algorithmEncryption) { return this->m_algorithmEncryption->Encrypt(privateKey,plaintext); } else { throw std::logic_error("Encrypt operation has not been enabled"); } } DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext, NativePoly *plaintext) const { if(this->m_algorithmEncryption) return this->m_algorithmEncryption->Decrypt(privateKey,ciphertext,plaintext); else { throw std::logic_error("Decrypt operation has not been enabled"); } } LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse) { if(this->m_algorithmEncryption) { auto kp = this->m_algorithmEncryption->KeyGen(cc, makeSparse); kp.publicKey->SetKeyTag( kp.secretKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeyGen operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPPREAlgorithm (PRE) // LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const { if(this->m_algorithmPRE) { auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey); rk->SetKeyTag( newKey->GetKeyTag() ); return rk; } else { throw std::logic_error("ReKeyGen operation has not been enabled"); } } LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const { if (this->m_algorithmPRE) { auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey); rk->SetKeyTag( newKey->GetKeyTag() ); return rk; } else { throw std::logic_error("ReKeyGen operation has not been enabled"); } } Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey, const Ciphertext<Element> ciphertext) const { if(this->m_algorithmPRE) { auto ct = this->m_algorithmPRE->ReEncrypt(evalKey,ciphertext); ct->SetKeyTag( evalKey->GetKeyTag() ); return ct; } else { throw std::logic_error("ReEncrypt operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPMultipartyAlgorithm (Multiparty) // // Wrapper for Multiparty Key Gen // FIXME check key ID for multiparty LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const LPPublicKey<Element> pk1, bool makeSparse, bool PRE) { if(this->m_algorithmMultiparty) { auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, pk1, makeSparse, PRE); k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() ); return k; } else { throw std::logic_error("MultipartyKeyGen operation has not been enabled"); } } // Wrapper for Multiparty Key Gen // FIXME key IDs for multiparty LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const vector<LPPrivateKey<Element>>& secretKeys, bool makeSparse) { if(this->m_algorithmMultiparty) { auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, secretKeys, makeSparse); k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() ); return k; } else { throw std::logic_error("MultipartyKeyGen operation has not been enabled"); } } // FIXME key IDs for multiparty Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext) const { if(this->m_algorithmMultiparty) { auto ct = this->m_algorithmMultiparty->MultipartyDecryptMain(privateKey,ciphertext); ct->SetKeyTag( privateKey->GetKeyTag() ); return ct; } else { throw std::logic_error("MultipartyDecryptMain operation has not been enabled"); } } // FIXME key IDs for multiparty Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext) const { if(this->m_algorithmMultiparty) { auto ct = this->m_algorithmMultiparty->MultipartyDecryptLead(privateKey,ciphertext); ct->SetKeyTag( privateKey->GetKeyTag() ); return ct; } else { throw std::logic_error("MultipartyDecryptLead operation has not been enabled"); } } DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec, NativePoly *plaintext) const { if(this->m_algorithmMultiparty) { return this->m_algorithmMultiparty->MultipartyDecryptFusion(ciphertextVec,plaintext); } else { throw std::logic_error("MultipartyDecrypt operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPSHEAlgorithm (SHE) // Ciphertext<Element> AddRandomNoise(const Ciphertext<Element> ciphertext) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->AddRandomNoise(ciphertext); else { throw std::logic_error("AddRandomNoise operation has not been enabled"); } } Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalAdd operation has not been enabled"); } } Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1, const Plaintext plaintext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, plaintext); return ct; } else { throw std::logic_error("EvalAdd operation has not been enabled"); } } Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalSub operation has not been enabled"); } } Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1, const Plaintext plaintext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, plaintext); return ct; } else { throw std::logic_error("EvalSub operation has not been enabled"); } } Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->EvalMult(ciphertext, plaintext); else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2, const LPEvalKey<Element> evalKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2, evalKey); return ct; } else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ciphertext, const vector<LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE){ return this->m_algorithmSHE->EvalMultMany(ciphertext, evalKeys); } else { throw std::logic_error("EvalMultMany operation has not been enabled"); } } Ciphertext<Element> EvalNegate(const Ciphertext<Element> ciphertext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalNegate(ciphertext); return ct; } else { throw std::logic_error("EvalNegate operation has not been enabled"); } } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(publicKey,origPrivateKey,indexList); for( auto& k : *km ) k.second->SetKeyTag( publicKey->GetKeyTag() ); return km; } else throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled"); } Ciphertext<Element> EvalAutomorphism(const Ciphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAutomorphism(ciphertext, i, evalKeys); return ct; } else throw std::logic_error("EvalAutomorphism operation has not been enabled"); } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(privateKey, indexList); for( auto& k : *km ) k.second->SetKeyTag( privateKey->GetKeyTag() ); return km; } else throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled"); } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalSumKeyGen(privateKey,publicKey); for( auto& k : *km ) { k.second->SetKeyTag( privateKey->GetKeyTag() ); } return km; } else throw std::logic_error("EvalSumKeyGen operation has not been enabled"); } Ciphertext<Element> EvalSum(const Ciphertext<Element> ciphertext, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSum(ciphertext, batchSize, evalKeys); return ct; } else throw std::logic_error("EvalSum operation has not been enabled"); } Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys, evalMultKey); ct->SetKeyTag( evalSumKeys.begin()->second->GetKeyTag() ); return ct; } else throw std::logic_error("EvalInnerProduct operation has not been enabled"); } Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Plaintext ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys); else throw std::logic_error("EvalInnerProduct operation has not been enabled"); } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { string kID = evalMultKey->GetKeyTag(); auto ctm = this->m_algorithmSHE->EvalLinRegressBatched(x, y, batchSize, evalSumKeys, evalMultKey); for( size_t r = 0; r < ctm->GetRows(); r++ ) for( size_t c = 0; c < ctm->GetCols(); c++ ) (*ctm)(r,c).SetKeyTag(kID); return ctm; } else throw std::logic_error("EvalLinRegressionBatched operation has not been enabled"); } Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart, usint length, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalCrossCorrelation(x, y, batchSize, indexStart, length, evalSumKeys, evalMultKey); // FIXME: mark with which key? return ct; } else throw std::logic_error("EvalCrossCorrelation operation has not been enabled"); } /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { if (this->m_algorithmSHE) { auto ctm = this->m_algorithmSHE->EvalLinRegression(x, y); // FIXME mark with which key?? return ctm; } else { throw std::logic_error("EvalLinRegression operation has not been enabled"); } } LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey) const { if (this->m_algorithmSHE) { auto kp = this->m_algorithmSHE->KeySwitchGen(originalPrivateKey, newPrivateKey); kp->SetKeyTag( newPrivateKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeySwitchGen operation has not been enabled"); } } Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, const Ciphertext<Element> cipherText) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->KeySwitch(keySwitchHint, cipherText); return ct; } else { throw std::logic_error("KeySwitch operation has not been enabled"); } } LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const { if (this->m_algorithmSHE) { auto kp = this->m_algorithmSHE->KeySwitchRelinGen(newKey, origPrivateKey); kp->SetKeyTag( newKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeySwitchRelinGen operation has not been enabled"); } } Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey, const Ciphertext<Element> ciphertext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->KeySwitchRelin(evalKey, ciphertext); ct->SetKeyTag( evalKey->GetKeyTag() ); return ct; } else { throw std::logic_error("KeySwitchRelin operation has not been enabled"); } } LPEvalKey<Element> EvalMultKeyGen(const LPPrivateKey<Element> originalPrivateKey) const { if(this->m_algorithmSHE) { auto ek = this->m_algorithmSHE->EvalMultKeyGen(originalPrivateKey); ek->SetKeyTag( originalPrivateKey->GetKeyTag() ); return ek; } else { throw std::logic_error("EvalMultKeyGen operation has not been enabled"); } } vector<LPEvalKey<Element>> EvalMultKeysGen(const LPPrivateKey<Element> originalPrivateKey) const { if(this->m_algorithmSHE){ auto ek = this->m_algorithmSHE->EvalMultKeysGen(originalPrivateKey); for(size_t i=0; i<ek.size(); i++) ek[i]->SetKeyTag( originalPrivateKey->GetKeyTag() ); return ek; } else { throw std::logic_error("EvalMultKeysGen operation has not been enabled"); } } Ciphertext<Element> EvalMultAndRelinearize(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const { if(this->m_algorithmSHE) return this->m_algorithmSHE->EvalMultAndRelinearize(ct1, ct2, ek); else { throw std::logic_error("EvalMultAndRelinearize operation has not been enabled"); } } ///////////////////////////////////////// // the functions below are wrappers for things in LPFHEAlgorithm (FHE) // // TODO: Add Bootstrap and any other FHE methods ///////////////////////////////////////// // the functions below are wrappers for things in LPSHEAlgorithm (SHE) // Ciphertext<Element> ModReduce(Ciphertext<Element> cipherText) const { if(this->m_algorithmLeveledSHE) { auto ct = this->m_algorithmLeveledSHE->ModReduce(cipherText); ct->SetKeyTag( cipherText->GetKeyTag() ); return ct; } else{ throw std::logic_error("ModReduce operation has not been enabled"); } } Ciphertext<Element> RingReduce(Ciphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->RingReduce(cipherText,keySwitchHint); ct->SetKeyTag( keySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("RingReduce operation has not been enabled"); } } bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const { if (this->m_algorithmLeveledSHE) { return this->m_algorithmLeveledSHE->CanRingReduce(ringDimension, moduli, rootHermiteFactor); } else { throw std::logic_error("CanRingReduce operation has not been enabled"); } } Ciphertext<Element> ComposedEvalMult( const Ciphertext<Element> cipherText1, const Ciphertext<Element> cipherText2, const LPEvalKey<Element> quadKeySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->ComposedEvalMult(cipherText1,cipherText2,quadKeySwitchHint); ct->SetKeyTag( quadKeySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("ComposedEvalMult operation has not been enabled"); } } Ciphertext<Element> LevelReduce(const Ciphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->LevelReduce(cipherText1,linearKeySwitchHint); ct->SetKeyTag( linearKeySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("LevelReduce operation has not been enabled"); } } const LPEncryptionAlgorithm<Element>& getAlgorithm() const { return *m_algorithmEncryption; } protected: LPParameterGenerationAlgorithm<Element> *m_algorithmParamsGen; LPEncryptionAlgorithm<Element> *m_algorithmEncryption; LPPREAlgorithm<Element> *m_algorithmPRE; LPMultipartyAlgorithm<Element> *m_algorithmMultiparty; LPSHEAlgorithm<Element> *m_algorithmSHE; LPFHEAlgorithm<Element> *m_algorithmFHE; LPLeveledSHEAlgorithm<Element> *m_algorithmLeveledSHE; }; } // namespace lbcrypto ends #endif
fill_nr_s8.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <stdlib.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "cvhf.h" #include "nr_direct.h" #include "optimizer.h" #include "gto/gto.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env); /* * 8-fold symmetry, k>=l, k>=i>=j, */ static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int *atm = envs->atm; const int *bas = envs->bas; const double *env = envs->env; const int natm = envs->natm; const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const CINTOpt *cintopt = envs->cintopt; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double *cache = eri + di * dj * nao2; int dims[4] = {nao, nao, dj, di}; int ksh, lsh, ij, k, l; int shls[4]; double *peri; shls[2] = jsh; shls[3] = ish; for (ksh = 0; ksh <= ish; ksh++) { for (lsh = 0; lsh <= ksh; lsh++) { shls[0] = lsh; shls[1] = ksh; peri = eri + ao_loc[ksh] * nao + ao_loc[lsh]; if ((*fprescreen)(shls, vhfopt, atm, bas, env)) { (*intor)(peri, dims, shls, atm, natm, bas, nbas, env, cintopt, cache); } else { for (ij = 0; ij < di*dj; ij++) { for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) { for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) { peri[k*nao+l] = 0; } } peri += nao2; } } } } } static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int i, j, k, l, i0, j0, kl; size_t ij0; double *peri, *pbuf; fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs); for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) { for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) { if (i0 >= j0) { ij0 = i0*(i0+1)/2 + j0; peri = eri + ij0*(ij0+1)/2; pbuf = buf + nao2 * (i*dj+j); for (kl = 0, k = 0; k < i0; k++) { for (l = 0; l <= k; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } // k == i0 for (l = 0; l <= j0; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } } } } void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const size_t nao = ao_loc[nbas]; IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL, cintopt, 1}; CVHFOpt *vhfopt; CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); vhfopt->fprescreen = CVHFnr_schwarz_cond; int shls_slice[] = {0, nbas}; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1); const size_t cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { int i, j, ij; double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size)); #pragma omp for nowait schedule(dynamic, 2) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - (i*(i+1)/2); store_ij(intor, eri, buf, i, j, vhfopt, &envs); } free(buf); } CVHFdel_optimizer(&vhfopt); }
round_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: [email protected] */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> int ref_round_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = round(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = round(src[i]); } } return 0; } return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map[0] = 0; // exec_node->inplace_map[1] = 0; // exec_node->inplace_map_num = 1; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map_num = 0; return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); // inplace inference // if(input_tensor->data != output_tensor->data) // { // TLOG_ERR("input and output are not the same mem\n"); // set_tengine_errno(EFAULT); // return -1; // } int ret = ref_round_fp32(input_tensor, output_tensor, exec_graph->num_thread); if (ret != 0) return -1; return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_round_hcl_ops(void* arg) { return register_builtin_node_ops(OP_ROUND, &hcl_node_ops); } static int unreg_round_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_ROUND, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_round_hcl_ops); AUTO_UNREGISTER_OPS(unreg_round_hcl_ops);
gi_labeling.h
/* * * Copyright (C) 2018 Attila Gyulassy <[email protected]> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef VERTEX_LABELING_H #define VERTEX_LABELING_H #include "gi_basic_types.h" namespace GInt { template<typename LABEL_TYPE> class DenseLabeling { protected: LABEL_TYPE* m_labels; INDEX_TYPE m_num_labels; public: DenseLabeling(INDEX_TYPE num_labels) : m_num_labels(num_labels) { m_labels = new LABEL_TYPE[num_labels]; //printf(" allocated space for %d values of size %d\n", num_labels, sizeof(LABEL_TYPE)); } ~DenseLabeling() { delete[] m_labels; } void SetLabel(INDEX_TYPE id, LABEL_TYPE label) { m_labels[id] = label; } LABEL_TYPE GetLabel(INDEX_TYPE id) const { return m_labels[id]; } INDEX_TYPE GetNumLabels() const { return m_num_labels; } LABEL_TYPE& operator[](const INDEX_TYPE id) { return m_labels[id]; } const LABEL_TYPE& operator[](const INDEX_TYPE id) const { return m_labels[id]; } void SetAll(LABEL_TYPE label){ #pragma omp parallel for schedule(static) for (int i = 0; i < m_num_labels; i++) { m_labels[i] = label; } } void CopyValues(const DenseLabeling<LABEL_TYPE>* other) { #pragma omp parallel for schedule(static) for (int i = 0; i < m_num_labels; i++) { m_labels[i] = other->m_labels[i]; } } template<typename T> void ReMapIds(T* output){ std::unordered_map<INDEX_TYPE, T> unique_ids; T new_id=0; for(INDEX_TYPE i=0; i < m_num_labels; i++){ if(unique_ids.find(m_labels[i]) == unique_ids.end()){ T set_id = new_id; if(m_labels[i] < 0) set_id = -1; else set_id = new_id++; unique_ids[m_labels[i]] = set_id; printf("mapping id %lld to %d\n", m_labels[i], set_id); } output[i] = unique_ids.at(m_labels[i]); } printf("remapped %d ids\n", new_id); } void ReadFromFile(const char* filename) { FILE* fout = fopen(filename, "rb"); fread(m_labels, sizeof(LABEL_TYPE), m_num_labels, fout); fclose(fout); } void OutputToFile(const char* filename) const { printf("writing file %s \n", filename); FILE* fout = fopen(filename, "wb"); //printf("Sizeof label type: %d\n", sizeof(LABEL_TYPE)); fwrite(m_labels, sizeof(LABEL_TYPE), m_num_labels, fout); fclose(fout); } void OutputToIntFile(const char* filename) const { printf("writing file %s \n", filename); FILE* fout = fopen(filename, "wb"); //printf("Sizeof int type: %d\n", sizeof(int)); for (INDEX_TYPE i = 0; i < m_num_labels; i++) { int tval = (int)m_labels[i]; fwrite(&tval, sizeof(int), 1, fout); } fclose(fout); } void OutputToFloatFile(const char* filename) const { printf("writing file %s \n", filename); FILE* fout = fopen(filename, "wb"); //printf("Sizeof float type: %d\n", sizeof(float)); for (INDEX_TYPE i = 0; i < m_num_labels; i++) { float tval = (float)m_labels[i]; fwrite(&tval, sizeof(float), 1, fout); } fclose(fout); } LABEL_TYPE* LabelArray() { return m_labels; } }; } #endif
copy.c
/* * ======================================================================================= * * Author: Jan Eitzinger (je), [email protected] * Copyright (c) 2020 RRZE, University Erlangen-Nuremberg * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ======================================================================================= */ #include <timing.h> double copy( double * restrict a, double * restrict b, int N ) { double S, E; S = getTimeStamp(); #pragma omp parallel for schedule(static) for (int i=0; i<N; i++) { a[i] = b[i]; } E = getTimeStamp(); return E-S; }
GB_concat_sparse_template.c
//------------------------------------------------------------------------------ // GB_concat_sparse_template: concatenate a tile into a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The tile A is hypersparse, sparse, or full, not bitmap. If C is iso, then // so is A, and the values are not copied here. { //-------------------------------------------------------------------------- // get C and the tile A //-------------------------------------------------------------------------- #ifndef GB_ISO_CONCAT const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // copy the tile A into C //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; const int64_t pC_start = W [j] ; //------------------------------------------------------------------ // find the part of the kth vector A(:,j) for this task //------------------------------------------------------------------ int64_t pA_start, pA_end ; // as done by GB_get_pA, but also get p0 = Ap [k] const int64_t p0 = GBP (Ap, k, avlen) ; const int64_t p1 = GBP (Ap, k+1, avlen) ; if (k == kfirst) { // First vector for task tid; may only be partially owned. pA_start = pstart_Aslice [tid] ; pA_end = GB_IMIN (p1, pstart_Aslice [tid+1]) ; } else if (k == klast) { // Last vector for task tid; may only be partially owned. pA_start = p0 ; pA_end = pstart_Aslice [tid+1] ; } else { // task tid entirely owns this vector A(:,k). pA_start = p0 ; pA_end = p1 ; } //------------------------------------------------------------------ // append A(:,j) onto C(:,j) //------------------------------------------------------------------ GB_PRAGMA_SIMD for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = GBI (Ai, pA, avlen) ; // i = Ai [pA] int64_t pC = pC_start + pA - p0 ; Ci [pC] = cistart + i ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA, A_iso) ; } } } done = true ; } #undef GB_CTYPE #undef GB_ISO_CONCAT
sddmm.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/sddmm.h * \brief SDDMM CPU kernel function header. */ #ifndef DGL_ARRAY_CPU_SDDMM_H_ #define DGL_ARRAY_CPU_SDDMM_H_ #include <dgl/array.h> #include <dgl/bcast.h> #include "../selector.h" namespace dgl { namespace aten { namespace cpu { /*! * \brief CPU kernel of g-SDDMM on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param lhs The left hand side operand feature. * \param rhs The right hand size operand feature. * \param out The result feature on edges. * \note it uses node parallel strategy, different threads are responsible * for the computation of different nodes. */ template <typename IdType, typename DType, typename Op, int LhsTarget = 0, int RhsTarget = 2> void SDDMMCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray lhs, NDArray rhs, NDArray out) { const bool has_idx = !IsNullArray(csr.data); const IdType* indptr = csr.indptr.Ptr<IdType>(); const IdType* indices = csr.indices.Ptr<IdType>(); const IdType* edges = csr.data.Ptr<IdType>(); const DType* X = lhs.Ptr<DType>(); const DType* Y = rhs.Ptr<DType>(); const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len, reduce_size = bcast.reduce_size; DType* O = out.Ptr<DType>(); #pragma omp parallel for for (IdType rid = 0; rid < csr.num_rows; ++rid) { const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; for (IdType j = row_start; j < row_end; ++j) { const IdType cid = indices[j]; const IdType eid = has_idx? edges[j] : j; DType* out_off = O + eid * dim; for (int64_t k = 0; k < dim; ++k) { const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs? X + Selector<LhsTarget>::Call(rid, eid, cid) * lhs_dim + lhs_add * reduce_size : nullptr; const DType* rhs_off = Op::use_rhs? Y + Selector<RhsTarget>::Call(rid, eid, cid) * rhs_dim + rhs_add * reduce_size : nullptr; out_off[k] = Op::Call(lhs_off, rhs_off, reduce_size); } } } } /*! * \brief CPU kernel of g-SDDMM on Coo format. * \param bcast Broadcast information. * \param coo The COO matrix. * \param lhs The left hand side operand feature. * \param rhs The right hand size operand feature. * \param out The result feature on edges. * \note it uses edge parallel strategy, different threads are responsible * for the computation of different edges. */ template <typename IdType, typename DType, typename Op, int LhsTarget = 0, int RhsTarget = 2> void SDDMMCoo(const BcastOff& bcast, const COOMatrix& coo, NDArray lhs, NDArray rhs, NDArray out) { const bool has_idx = !IsNullArray(coo.data); const IdType* row = coo.row.Ptr<IdType>(); const IdType* col = coo.col.Ptr<IdType>(); const IdType* edges = coo.data.Ptr<IdType>(); const DType* X = lhs.Ptr<DType>(); const DType* Y = rhs.Ptr<DType>(); const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len, reduce_size = bcast.reduce_size; DType* O = out.Ptr<DType>(); const int64_t nnz = coo.row->shape[0]; #pragma omp parallel for for (IdType i = 0; i < nnz; ++i) { const IdType rid = row[i]; const IdType cid = col[i]; const IdType eid = has_idx? edges[i] : i; DType* out_off = O + eid * dim; for (int64_t k = 0; k < dim; ++k) { const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs ? X + Selector<LhsTarget>::Call(rid, eid, cid) * lhs_dim + lhs_add * reduce_size : nullptr; const DType* rhs_off = Op::use_rhs ? Y + Selector<RhsTarget>::Call(rid, eid, cid) * rhs_dim + rhs_add * reduce_size : nullptr; out_off[k] = Op::Call(lhs_off, rhs_off, bcast.reduce_size); } } } namespace op { //////////////////////////////// binary operators on CPU //////////////////////////////// template <typename DType> struct Add { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off, int64_t len = 1) { return *lhs_off + *rhs_off; } }; template <typename DType> struct Sub { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off, int64_t len = 1) { return *lhs_off - *rhs_off; } }; template <typename DType> struct Mul { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off, int64_t len = 1) { return *lhs_off * *rhs_off; } }; template <typename DType> struct Div { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off, int64_t len = 1) { return *lhs_off / *rhs_off; } }; template <typename DType> struct CopyLhs { static constexpr bool use_lhs = true; static constexpr bool use_rhs = false; inline static DType Call(const DType* lhs_off, const DType*, int64_t len = 1) { return *lhs_off; } }; template <typename DType> struct CopyRhs { static constexpr bool use_lhs = false; static constexpr bool use_rhs = true; inline static DType Call(const DType* , const DType* rhs_off, int64_t len = 1) { return *rhs_off; } }; template <typename DType> struct Dot { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off, int64_t len = 1) { DType rst = 0; for (int64_t l = 0; l < len; ++l) { rst += lhs_off[l] * rhs_off[l]; } return rst; } }; #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef dgl::aten::cpu::op::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef dgl::aten::cpu::op::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef dgl::aten::cpu::op::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef dgl::aten::cpu::op::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "dot") { \ typedef dgl::aten::cpu::op::Dot<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SDDMM binary operator: " << op; \ } \ } while (0) } // namespace op } // namespace cpu } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_SDDMM_H_
helpme_standalone.h
// // WARNING! This file is automatically generated from the sources in the src directory. // Do not modify this source code directly as any changes will be overwritten // // original file: ../src/helpme.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_HELPME_H_ #define _HELPME_HELPME_H_ #if __cplusplus || DOXYGEN // C++ header #include <algorithm> #include <array> #include <cmath> #include <complex> #include <functional> #include <iostream> #include <memory> #ifdef _OPENMP #include <omp.h> #endif #include <stdexcept> #include <string> #include <tuple> #include <unistd.h> #include <vector> // original file: ../src/cartesiantransform.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_CARTESIANTRANSFORM_H_ #define _HELPME_CARTESIANTRANSFORM_H_ // original file: ../src/matrix.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_MATRIX_H_ #define _HELPME_MATRIX_H_ #include <algorithm> #include <complex> #include <fstream> #include <initializer_list> #include <iostream> #include <iomanip> #include <numeric> #include <stdexcept> #include <tuple> // original file: ../src/lapack_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE // // The code for Jacobi diagonalization is taken (with minimal modification) from // // http://www.mymathlib.com/c_source/matrices/eigen/jacobi_cyclic_method.c // #ifndef _HELPME_LAPACK_WRAPPER_H_ #define _HELPME_LAPACK_WRAPPER_H_ #include <cmath> #include <limits> namespace helpme { //////////////////////////////////////////////////////////////////////////////// // void Jacobi_Cyclic_Method // // (Real eigenvalues[], Real *eigenvectors, Real *A, int n) // // // // Description: // // Find the eigenvalues and eigenvectors of a symmetric n x n matrix A // // using the Jacobi method. Upon return, the input matrix A will have // // been modified. // // The Jacobi procedure for finding the eigenvalues and eigenvectors of a // // symmetric matrix A is based on finding a similarity transformation // // which diagonalizes A. The similarity transformation is given by a // // product of a sequence of orthogonal (rotation) matrices each of which // // annihilates an off-diagonal element and its transpose. The rotation // // effects only the rows and columns containing the off-diagonal element // // and its transpose, i.e. if a[i][j] is an off-diagonal element, then // // the orthogonal transformation rotates rows a[i][] and a[j][], and // // equivalently it rotates columns a[][i] and a[][j], so that a[i][j] = 0 // // and a[j][i] = 0. // // The cyclic Jacobi method considers the off-diagonal elements in the // // following order: (0,1),(0,2),...,(0,n-1),(1,2),...,(n-2,n-1). If the // // the magnitude of the off-diagonal element is greater than a treshold, // // then a rotation is performed to annihilate that off-diagnonal element. // // The process described above is called a sweep. After a sweep has been // // completed, the threshold is lowered and another sweep is performed // // with the new threshold. This process is completed until the final // // sweep is performed with the final threshold. // // The orthogonal transformation which annihilates the matrix element // // a[k][m], k != m, is Q = q[i][j], where q[i][j] = 0 if i != j, i,j != k // // i,j != m and q[i][j] = 1 if i = j, i,j != k, i,j != m, q[k][k] = // // q[m][m] = cos(phi), q[k][m] = -sin(phi), and q[m][k] = sin(phi), where // // the angle phi is determined by requiring a[k][m] -> 0. This condition // // on the angle phi is equivalent to // // cot(2 phi) = 0.5 * (a[k][k] - a[m][m]) / a[k][m] // // Since tan(2 phi) = 2 tan(phi) / (1 - tan(phi)^2), // // tan(phi)^2 + 2cot(2 phi) * tan(phi) - 1 = 0. // // Solving for tan(phi), choosing the solution with smallest magnitude, // // tan(phi) = - cot(2 phi) + sgn(cot(2 phi)) sqrt(cot(2phi)^2 + 1). // // Then cos(phi)^2 = 1 / (1 + tan(phi)^2) and sin(phi)^2 = 1 - cos(phi)^2 // // Finally by taking the sqrts and assigning the sign to the sin the same // // as that of the tan, the orthogonal transformation Q is determined. // // Let A" be the matrix obtained from the matrix A by applying the // // similarity transformation Q, since Q is orthogonal, A" = Q'AQ, where Q'// // is the transpose of Q (which is the same as the inverse of Q). Then // // a"[i][j] = Q'[i][p] a[p][q] Q[q][j] = Q[p][i] a[p][q] Q[q][j], // // where repeated indices are summed over. // // If i is not equal to either k or m, then Q[i][j] is the Kronecker // // delta. So if both i and j are not equal to either k or m, // // a"[i][j] = a[i][j]. // // If i = k, j = k, // // a"[k][k] = // // a[k][k]*cos(phi)^2 + a[k][m]*sin(2 phi) + a[m][m]*sin(phi)^2 // // If i = k, j = m, // // a"[k][m] = a"[m][k] = 0 = // // a[k][m]*cos(2 phi) + 0.5 * (a[m][m] - a[k][k])*sin(2 phi) // // If i = k, j != k or m, // // a"[k][j] = a"[j][k] = a[k][j] * cos(phi) + a[m][j] * sin(phi) // // If i = m, j = k, a"[m][k] = 0 // // If i = m, j = m, // // a"[m][m] = // // a[m][m]*cos(phi)^2 - a[k][m]*sin(2 phi) + a[k][k]*sin(phi)^2 // // If i= m, j != k or m, // // a"[m][j] = a"[j][m] = a[m][j] * cos(phi) - a[k][j] * sin(phi) // // // // If X is the matrix of normalized eigenvectors stored so that the ith // // column corresponds to the ith eigenvalue, then AX = X Lamda, where // // Lambda is the diagonal matrix with the ith eigenvalue stored at // // Lambda[i][i], i.e. X'AX = Lambda and X is orthogonal, the eigenvectors // // are normalized and orthogonal. So, X = Q1 Q2 ... Qs, where Qi is // // the ith orthogonal matrix, i.e. X can be recursively approximated by // // the recursion relation X" = X Q, where Q is the orthogonal matrix and // // the initial estimate for X is the identity matrix. // // If j = k, then x"[i][k] = x[i][k] * cos(phi) + x[i][m] * sin(phi), // // if j = m, then x"[i][m] = x[i][m] * cos(phi) - x[i][k] * sin(phi), and // // if j != k and j != m, then x"[i][j] = x[i][j]. // // // // Arguments: // // Real eigenvalues // // Array of dimension n, which upon return contains the eigenvalues of // // the matrix A. // // Real* eigenvectors // // Matrix of eigenvectors, the ith column of which contains an // // eigenvector corresponding to the ith eigenvalue in the array // // eigenvalues. // // Real* A // // Pointer to the first element of the symmetric n x n matrix A. The // // input matrix A is modified during the process. // // int n // // The dimension of the array eigenvalues, number of columns and rows // // of the matrices eigenvectors and A. // // // // Return Values: // // Function is of type void. // // // // Example: // // #define N // // Real A[N][N], Real eigenvalues[N], Real eigenvectors[N][N] // // // // (your code to initialize the matrix A ) // // // // JacobiCyclicDiagonalization(eigenvalues, (Real*)eigenvectors, // // (Real *) A, N); // //////////////////////////////////////////////////////////////////////////////// template <typename Real> void JacobiCyclicDiagonalization(Real *eigenvalues, Real *eigenvectors, const Real *A, int n) { int i, j, k, m; Real *pAk, *pAm, *p_r, *p_e; Real threshold_norm; Real threshold; Real tan_phi, sin_phi, cos_phi, tan2_phi, sin2_phi, cos2_phi; Real sin_2phi, cos_2phi, cot_2phi; Real dum1; Real dum2; Real dum3; Real max; // Take care of trivial cases if (n < 1) return; if (n == 1) { eigenvalues[0] = *A; *eigenvectors = 1; return; } // Initialize the eigenvalues to the identity matrix. for (p_e = eigenvectors, i = 0; i < n; i++) for (j = 0; j < n; p_e++, j++) if (i == j) *p_e = 1; else *p_e = 0; // Calculate the threshold and threshold_norm. for (threshold = 0, pAk = const_cast<Real *>(A), i = 0; i < (n - 1); pAk += n, i++) for (j = i + 1; j < n; j++) threshold += *(pAk + j) * *(pAk + j); threshold = sqrt(threshold + threshold); threshold_norm = threshold * std::numeric_limits<Real>::epsilon(); max = threshold + 1; while (threshold > threshold_norm) { threshold /= 10; if (max < threshold) continue; max = 0; for (pAk = const_cast<Real *>(A), k = 0; k < (n - 1); pAk += n, k++) { for (pAm = pAk + n, m = k + 1; m < n; pAm += n, m++) { if (std::abs(*(pAk + m)) < threshold) continue; // Calculate the sin and cos of the rotation angle which // annihilates A[k][m]. cot_2phi = 0.5f * (*(pAk + k) - *(pAm + m)) / *(pAk + m); dum1 = sqrt(cot_2phi * cot_2phi + 1); if (cot_2phi < 0) dum1 = -dum1; tan_phi = -cot_2phi + dum1; tan2_phi = tan_phi * tan_phi; sin2_phi = tan2_phi / (1 + tan2_phi); cos2_phi = 1 - sin2_phi; sin_phi = sqrt(sin2_phi); if (tan_phi < 0) sin_phi = -sin_phi; cos_phi = sqrt(cos2_phi); sin_2phi = 2 * sin_phi * cos_phi; cos_2phi = cos2_phi - sin2_phi; // Rotate columns k and m for both the matrix A // and the matrix of eigenvectors. p_r = const_cast<Real *>(A); dum1 = *(pAk + k); dum2 = *(pAm + m); dum3 = *(pAk + m); *(pAk + k) = dum1 * cos2_phi + dum2 * sin2_phi + dum3 * sin_2phi; *(pAm + m) = dum1 * sin2_phi + dum2 * cos2_phi - dum3 * sin_2phi; *(pAk + m) = 0; *(pAm + k) = 0; for (i = 0; i < n; p_r += n, i++) { if ((i == k) || (i == m)) continue; if (i < k) dum1 = *(p_r + k); else dum1 = *(pAk + i); if (i < m) dum2 = *(p_r + m); else dum2 = *(pAm + i); dum3 = dum1 * cos_phi + dum2 * sin_phi; if (i < k) *(p_r + k) = dum3; else *(pAk + i) = dum3; dum3 = -dum1 * sin_phi + dum2 * cos_phi; if (i < m) *(p_r + m) = dum3; else *(pAm + i) = dum3; } for (p_e = eigenvectors, i = 0; i < n; p_e += n, i++) { dum1 = *(p_e + k); dum2 = *(p_e + m); *(p_e + k) = dum1 * cos_phi + dum2 * sin_phi; *(p_e + m) = -dum1 * sin_phi + dum2 * cos_phi; } } for (i = 0; i < n; i++) if (i == k) continue; else if (max < std::abs(*(pAk + i))) max = std::abs(*(pAk + i)); } } for (pAk = const_cast<Real *>(A), k = 0; k < n; pAk += n, k++) eigenvalues[k] = *(pAk + k); } } // Namespace helpme #endif // Header guard // original file: ../src/string_utils.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_STRING_UTIL_H_ #define _HELPME_STRING_UTIL_H_ #include <complex> #include <iomanip> #include <iostream> #include <sstream> #include <string> namespace helpme { /*! * \brief makes a string representation of a floating point number. * \param width the width used to display the number. * \param precision the precision used to display the number. * \return the string representation of the floating point number. */ template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> std::string formatNumber(const T &number, int width, int precision) { std::stringstream stream; stream.setf(std::ios::fixed, std::ios::floatfield); stream << std::setw(width) << std::setprecision(precision) << number; return stream.str(); } /*! * \brief makes a string representation of a complex number. * \param width the width used to display the real and the imaginary components. * \param precision the precision used to display the real and the imaginary components. * \return the string representation of the complex number. */ template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0> std::string formatNumber(const T &number, int width, int precision) { std::stringstream stream; stream.setf(std::ios::fixed, std::ios::floatfield); stream << "(" << std::setw(width) << std::setprecision(precision) << number.real() << ", " << std::setw(width) << std::setprecision(precision) << number.imag() << ")"; return stream.str(); } /*! * \brief makes a string representation of a multdimensional tensor, stored in a flat array. * \param data pointer to the start of the array holding the tensor information. * \param size the length of the array holding the tensor information. * \param rowDim the dimension of the fastest running index. * \param width the width of each individual floating point number. * \param precision used to display each floating point number. * \return the string representation of the tensor. */ template <typename T> std::string stringify(T *data, size_t size, size_t rowDim, int width = 14, int precision = 8) { std::stringstream stream; for (size_t ind = 0; ind < size; ++ind) { stream << formatNumber(data[ind], width, precision); if (ind % rowDim == rowDim - 1) stream << std::endl; else stream << " "; } return stream.str(); } } // Namespace helpme #endif // Header guard // original file: ../src/memory.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_MEMORY_H_ #define _HELPME_MEMORY_H_ #include <stdexcept> #include <vector> #include <fftw3.h> namespace helpme { /*! * \brief FFTWAllocator a class to handle aligned allocation of memory using the FFTW libraries. * Code is adapted from http://www.josuttis.com/cppcode/myalloc.hpp.html. */ template <class T> class FFTWAllocator { public: // type definitions typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; // rebind allocator to type U template <class U> struct rebind { typedef FFTWAllocator<U> other; }; // return address of values pointer address(reference value) const { return &value; } const_pointer address(const_reference value) const { return &value; } /* constructors and destructor * - nothing to do because the allocator has no state */ FFTWAllocator() throw() {} FFTWAllocator(const FFTWAllocator&) throw() {} template <class U> FFTWAllocator(const FFTWAllocator<U>&) throw() {} ~FFTWAllocator() throw() {} // return maximum number of elements that can be allocated size_type max_size() const throw() { return std::numeric_limits<std::size_t>::max() / sizeof(T); } // allocate but don't initialize num elements of type T pointer allocate(size_type num, const void* = 0) { return static_cast<pointer>(fftw_malloc(num * sizeof(T))); } // initialize elements of allocated storage p with value value void construct(pointer p, const T& value) { // initialize memory with placement new new ((void*)p) T(value); } // destroy elements of initialized storage p void destroy(pointer p) {} // deallocate storage p of deleted elements void deallocate(pointer p, size_type num) { fftw_free(static_cast<void*>(p)); } }; // return that all specializations of this allocator are interchangeable template <class T1, class T2> bool operator==(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() { return true; } template <class T1, class T2> bool operator!=(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() { return false; } template <typename Real> using vector = std::vector<Real, FFTWAllocator<Real>>; } // Namespace helpme #endif // Header guard namespace helpme { /*! * A helper function to transpose a dense matrix in place, gratuitously stolen from * https://stackoverflow.com/questions/9227747/in-place-transposition-of-a-matrix */ template <class RandomIterator> void transposeMemoryInPlace(RandomIterator first, RandomIterator last, int m) { const int mn1 = (last - first - 1); const int n = (last - first) / m; std::vector<bool> visited(last - first); RandomIterator cycle = first; while (++cycle != last) { if (visited[cycle - first]) continue; int a = cycle - first; do { a = a == mn1 ? mn1 : (n * a) % mn1; std::swap(*(first + a), *cycle); visited[a] = true; } while ((first + a) != cycle); } } /*! * \brief The Matrix class is designed to serve as a convenient wrapper to simplify 2D matrix operations. * It assumes dense matrices with contiguious data and the fast running index being the right * (column) index. The underlying memory may have already been allocated elsewhere by C, Fortran * or Python, and is directly manipulated in place, saving an expensive copy operation. To provide * read-only access to such memory address, use a const template type. */ template <typename Real> class Matrix { protected: /// The number of rows in the matrix. size_t nRows_; /// The number of columns in the matrix. size_t nCols_; /// A vector to conveniently allocate data, if we really need to. helpme::vector<Real> allocatedData_; /// Pointer to the raw data, whose allocation may not be controlled by us. Real* data_; public: enum class SortOrder { Ascending, Descending }; inline const Real& operator()(int row, int col) const { return *(data_ + row * nCols_ + col); } inline const Real& operator()(const std::pair<int, int>& indices) const { return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices)); } inline Real& operator()(int row, int col) { return *(data_ + row * nCols_ + col); } inline Real& operator()(const std::pair<int, int>& indices) { return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices)); } inline const Real* operator[](int row) const { return data_ + row * nCols_; } inline Real* operator[](int row) { return data_ + row * nCols_; } Real* begin() const { return data_; } Real* end() const { return data_ + nRows_ * nCols_; } const Real* cbegin() const { return data_; } const Real* cend() const { return data_ + nRows_ * nCols_; } /*! * \brief The sliceIterator struct provides a read-only view of a sub-block of a matrix, with arbitrary size. */ struct sliceIterator { Real *begin_, *end_, *ptr_; size_t stride_; sliceIterator(Real* start, Real* end, size_t stride) : begin_(start), end_(end), ptr_(start), stride_(stride) {} sliceIterator begin() const { return sliceIterator(begin_, end_, stride_); } sliceIterator end() const { return sliceIterator(end_, end_, 0); } sliceIterator cbegin() const { return sliceIterator(begin_, end_, stride_); } sliceIterator cend() const { return sliceIterator(end_, end_, 0); } bool operator!=(const sliceIterator& other) { return ptr_ != other.ptr_; } sliceIterator operator*=(Real val) { for (auto& element : *this) element *= val; return *this; } sliceIterator operator/=(Real val) { Real invVal = 1 / val; for (auto& element : *this) element *= invVal; return *this; } sliceIterator operator-=(Real val) { for (auto& element : *this) element -= val; return *this; } sliceIterator operator+=(Real val) { for (auto& element : *this) element += val; return *this; } sliceIterator operator++() { ptr_ += stride_; return *this; } const Real& operator[](size_t index) { return *(begin_ + index); } size_t size() const { return std::distance(begin_, end_) / stride_; } void assertSameSize(const sliceIterator& other) const { if (size() != other.size()) throw std::runtime_error("Slice operations only supported for slices of the same size."); } void assertContiguous(const sliceIterator& iter) const { if (iter.stride_ != 1) throw std::runtime_error( "Slice operations called on operation that is only allowed for contiguous data."); } Matrix<Real> operator-(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); Matrix ret(1, size()); std::transform(begin_, end_, other.begin_, ret[0], [](const Real& a, const Real& b) -> Real { return a - b; }); return ret; } sliceIterator operator-=(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); std::transform(begin_, end_, other.begin_, begin_, [](const Real& a, const Real& b) -> Real { return a - b; }); return *this; } sliceIterator operator+=(const sliceIterator& other) const { assertSameSize(other); assertContiguous(*this); assertContiguous(other); std::transform(begin_, end_, other.begin_, begin_, [](const Real& a, const Real& b) -> Real { return a + b; }); return *this; } Real& operator*() { return *ptr_; } }; /*! * \brief row returns a read-only iterator over a given row. * \param r the row to return. * \return the slice in memory corresponding to the rth row. */ sliceIterator row(size_t r) const { return sliceIterator(data_ + r * nCols_, data_ + (r + 1) * nCols_, 1); } /*! * \brief col returns a read-only iterator over a given column. * \param c the column to return. * \return the slice in memory corresponding to the cth column. */ sliceIterator col(size_t c) const { return sliceIterator(data_ + c, data_ + nRows_ * nCols_ + c, nCols_); } /*! * \return the number of rows in this matrix. */ size_t nRows() const { return nRows_; } /*! * \return the number of columns in this matrix. */ size_t nCols() const { return nCols_; } /*! * \brief Matrix Constructs an empty matrix. */ Matrix() : nRows_(0), nCols_(0) {} /*! * \brief Matrix Constructs a new matrix, allocating memory. * \param nRows the number of rows in the matrix. * \param nCols the number of columns in the matrix. */ Matrix(size_t nRows, size_t nCols) : nRows_(nRows), nCols_(nCols), allocatedData_(nRows * nCols, 0), data_(allocatedData_.data()) {} /*! * \brief Matrix Constructs a new matrix, allocating memory. * \param filename the ASCII file from which to read this matrix */ Matrix(const std::string& filename) { Real tmp; std::ifstream inFile(filename); if (!inFile) { std::string msg("Unable to open file "); msg += filename; throw std::runtime_error(msg); } inFile >> nRows_; inFile >> nCols_; while (inFile >> tmp) allocatedData_.push_back(tmp); inFile.close(); if (nRows_ * nCols_ != allocatedData_.size()) { allocatedData_.clear(); std::string msg("Inconsistent dimensions in "); msg += filename; msg += ". Amount of data inconsitent with declared size."; throw std::runtime_error(msg); } allocatedData_.shrink_to_fit(); data_ = allocatedData_.data(); } /*! * \brief Matrix Constructs a new matrix, allocating memory and initializing values using the braced initializer. * \param data a braced initializer list of braced initializer lists containing the values to be stored in the * matrix. */ Matrix(std::initializer_list<std::initializer_list<Real>> data) { nRows_ = data.size(); nCols_ = nRows_ ? data.begin()->size() : 0; allocatedData_.reserve(nRows_ * nCols_); for (auto& row : data) { if (row.size() != nCols_) throw std::runtime_error("Inconsistent row dimensions in matrix specification."); allocatedData_.insert(allocatedData_.end(), row.begin(), row.end()); } data_ = allocatedData_.data(); } /*! * \brief Matrix Constructs a new column vector, allocating memory and initializing values using the braced * initializer. \param data a braced initializer list of braced initializer lists containing the values to be stored * in the matrix. */ Matrix(std::initializer_list<Real> data) : allocatedData_(data), data_(allocatedData_.data()) { nRows_ = data.size(); nCols_ = 1; } /*! * \brief Matrix Constructs a new matrix using already allocated memory. * \param ptr the already-allocated memory underlying this matrix. * \param nRows the number of rows in the matrix. * \param nCols the number of columns in the matrix. */ Matrix(Real* ptr, size_t nRows, size_t nCols) : nRows_(nRows), nCols_(nCols), data_(ptr) {} /*! * \brief cast make a copy of this matrix, with its elements cast as a different type. * \tparam NewReal the type to cast each element to. * \return the copy of the matrix with the new type. */ template <typename NewReal> Matrix<NewReal> cast() const { Matrix<NewReal> newMat(nRows_, nCols_); NewReal* newPtr = newMat[0]; const Real* dataPtr = data_; for (size_t addr = 0; addr < nRows_ * nCols_; ++addr) *newPtr++ = static_cast<NewReal>(*dataPtr++); return newMat; } /*! * \brief setConstant sets all elements of this matrix to a specified value. * \param value the value to set each element to. */ void setConstant(Real value) { std::fill(begin(), end(), value); } /*! * \brief setZero sets each element of this matrix to zero. */ void setZero() { setConstant(0); } /*! * \brief isNearZero checks that each element in this matrix has an absolute value below some threshold. * \param threshold the value below which an element is considered zero. * \return whether all values are near zero or not. */ bool isNearZero(Real threshold = 1e-10f) const { return !std::any_of(cbegin(), cend(), [&](const Real& val) { return std::abs(val) > threshold; }); } /*! * \brief inverse inverts this matrix, leaving the original matrix untouched. * \return the inverse of this matrix. */ Matrix inverse() const { assertSquare(); Matrix matrixInverse(nRows_, nRows_); if (nRows() == 3) { // 3x3 is a really common case, so treat it here as. Real determinant = data_[0] * (data_[4] * data_[8] - data_[7] * data_[5]) - data_[1] * (data_[3] * data_[8] - data_[5] * data_[6]) + data_[2] * (data_[3] * data_[7] - data_[4] * data_[6]); Real determinantInverse = 1 / determinant; matrixInverse.data_[0] = (data_[4] * data_[8] - data_[7] * data_[5]) * determinantInverse; matrixInverse.data_[1] = (data_[2] * data_[7] - data_[1] * data_[8]) * determinantInverse; matrixInverse.data_[2] = (data_[1] * data_[5] - data_[2] * data_[4]) * determinantInverse; matrixInverse.data_[3] = (data_[5] * data_[6] - data_[3] * data_[8]) * determinantInverse; matrixInverse.data_[4] = (data_[0] * data_[8] - data_[2] * data_[6]) * determinantInverse; matrixInverse.data_[5] = (data_[3] * data_[2] - data_[0] * data_[5]) * determinantInverse; matrixInverse.data_[6] = (data_[3] * data_[7] - data_[6] * data_[4]) * determinantInverse; matrixInverse.data_[7] = (data_[6] * data_[1] - data_[0] * data_[7]) * determinantInverse; matrixInverse.data_[8] = (data_[0] * data_[4] - data_[3] * data_[1]) * determinantInverse; } else { // Generic case; just use spectral decomposition, invert the eigenvalues, and stitch back together. // Note that this only works for symmetric matrices. Need to hook into Lapack for a general // inversion routine if this becomes a limitation. return this->applyOperation([](Real& element) { element = 1 / element; }); } return matrixInverse; } /*! * \brief assertSymmetric checks that this matrix is symmetric within some threshold. * \param threshold the value below which an pair's difference is considered zero. */ void assertSymmetric(const Real& threshold = 1e-10f) const { assertSquare(); for (int row = 0; row < nRows_; ++row) { for (int col = 0; col < row; ++col) { if (std::abs(data_[row * nCols_ + col] - data_[col * nCols_ + row]) > threshold) throw std::runtime_error("Unexpected non-symmetric matrix found."); } } } /*! * \brief applyOperationToEachElement modifies every element in the matrix by applying an operation. * \param function a unary operator describing the operation to perform. */ void applyOperationToEachElement(const std::function<void(Real&)>& function) { std::for_each(begin(), end(), function); } /*! * \brief applyOperation applies an operation to this matrix using the spectral decomposition, * leaving the original untouched. Only for symmetric matrices, as coded. * \param function a undary operator describing the operation to perform. * \return the matrix transformed by the operator. */ Matrix applyOperation(const std::function<void(Real&)>& function) const { assertSymmetric(); auto eigenPairs = this->diagonalize(); Matrix evalsReal = std::get<0>(eigenPairs); Matrix evecs = std::get<1>(eigenPairs); evalsReal.applyOperationToEachElement(function); Matrix evecsT = evecs.transpose(); for (int row = 0; row < nRows_; ++row) { Real transformedEigenvalue = evalsReal[row][0]; std::for_each(evecsT.data_ + row * nCols_, evecsT.data_ + (row + 1) * nCols_, [&](Real& val) { val *= transformedEigenvalue; }); } return evecs * evecsT; } /*! * \brief assertSameSize make sure that this Matrix has the same dimensions as another Matrix. * \param other the matrix to compare to. */ void assertSameSize(const Matrix& other) const { if (nRows_ != other.nRows_ || nCols_ != other.nCols_) throw std::runtime_error("Attepting to compare matrices of different sizes!"); } /*! * \brief assertSquare make sure that this Matrix is square. */ void assertSquare() const { if (nRows_ != nCols_) throw std::runtime_error("Attepting to perform a square matrix operation on a non-square matrix!"); } /*! * \brief multiply this matrix with another, returning a new matrix containing the product. * \param other the right hand side of the matrix product. * \return the product of this matrix with the matrix other. */ Matrix multiply(const Matrix& other) const { // TODO one fine day this should be replaced by GEMM calls, if matrix multiplies actually get used much. if (nCols_ != other.nRows_) throw std::runtime_error("Attempting to multiply matrices with incompatible dimensions."); Matrix product(nRows_, other.nCols_); Real* output = product.data_; for (int row = 0; row < nRows_; ++row) { const Real* rowPtr = data_ + row * nCols_; for (int col = 0; col < other.nCols_; ++col) { for (int link = 0; link < nCols_; ++link) { *output += rowPtr[link] * other.data_[link * other.nCols_ + col]; } ++output; } } return product; } /*! * \brief operator * a convenient wrapper for the multiply function. * \param other the right hand side of the matrix product. * \return the product of this matrix with the matrix other. */ Matrix operator*(const Matrix& other) const { return this->multiply(other); } /*! * \brief increment this matrix with another, returning a new matrix containing the sum. * \param other the right hand side of the matrix sum. * \return the sum of this matrix and the matrix other. */ Matrix incrementWith(const Matrix& other) { assertSameSize(other); std::transform(begin(), end(), other.begin(), begin(), [](const Real& a, const Real& b) -> Real { return a + b; }); return *this; } /*! * \brief a wrapper around the incrementWith() function. * \param other the right hand side of the matrix sum. * \return the sum of this matrix and the matrix other. */ Matrix operator+=(const Matrix& other) { return this->incrementWith(other); } /*! * \brief almostEquals checks that two matrices have all elements the same, within some specificied tolerance. * \param other the matrix against which we're comparing. * \param tol the amount that each element is allowed to deviate by. * \return whether the two matrices are almost equal. */ template <typename T = Real, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const { // The floating point version assertSameSize(other); return std::equal(cbegin(), cend(), other.cbegin(), [&tolerance](Real a, Real b) -> bool { return (((a - b) < std::real(tolerance)) && ((a - b) > -std::real(tolerance))); }); } template <typename T = Real, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0> bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const { // The complex version assertSameSize(other); auto tol = std::real(tolerance); // This is a little confusing, but the type "Real" is actually some king of std::complex<...>. return std::equal(cbegin(), cend(), other.cbegin(), [&tol](Real a, Real b) -> bool { return (((a.real() - b.real()) < tol) && ((a.real() - b.real()) > -tol) && ((a.imag() - b.imag()) < tol) && ((a.imag() - b.imag()) > -tol)); }); } /*! * \brief dot computes the inner product of this matrix with another. * \param other the other matrix in the inner product, which must have the same dimensions. * \return the inner product. */ Real dot(const Matrix& other) const { assertSameSize(other); return std::inner_product(cbegin(), cend(), other.cbegin(), Real(0)); } /*! * \brief write formatted matrix to a stream object. * \param os stream object to write to. * \return modified stream object. */ std::ostream& write(std::ostream& os) const { for (int row = 0; row < nRows_; ++row) os << stringify(data_ + row * nCols_, nCols_, nCols_); os << std::endl; return os; } /*! * \brief transposeInPlace transposes this matrix in place. */ void transposeInPlace() { transposeMemoryInPlace(begin(), end(), nCols_); std::swap(nCols_, nRows_); } /*! * \brief clone make a new copy of this matrix by deep copying the data. * \return the copy of this matrix. */ Matrix clone() const { Matrix newMatrix = Matrix(nRows_, nCols_); std::copy(cbegin(), cend(), newMatrix.begin()); return newMatrix; } /*! * \brief transpose this matrix, leaving the original untouched. * \return a transposed deep copy of this matrix. */ Matrix transpose() const { Matrix copy = this->clone(); copy.transposeInPlace(); return copy; } /*! * \brief diagonalize diagonalize this matrix, leaving the original untouched. Note that this assumes * that this matrix is real and symmetric. * \param order how to order the (eigenvalue,eigenvector) pairs, where the sort key is the eigenvalue. * \return a pair of corresponding <eigenvalue , eigenvectors> sorted according to the order variable. * The eigenvectors are stored by column. */ std::pair<Matrix<Real>, Matrix<Real>> diagonalize(SortOrder order = SortOrder::Ascending) const { assertSymmetric(); Matrix eigenValues(nRows_, 1); Matrix unsortedEigenVectors(nRows_, nRows_); Matrix sortedEigenVectors(nRows_, nRows_); JacobiCyclicDiagonalization<Real>(eigenValues[0], unsortedEigenVectors[0], cbegin(), nRows_); unsortedEigenVectors.transposeInPlace(); std::vector<std::pair<Real, const Real*>> eigenPairs; for (int val = 0; val < nRows_; ++val) eigenPairs.push_back({eigenValues[val][0], unsortedEigenVectors[val]}); std::sort(eigenPairs.begin(), eigenPairs.end()); if (order == SortOrder::Descending) std::reverse(eigenPairs.begin(), eigenPairs.end()); for (int val = 0; val < nRows_; ++val) { const auto& e = eigenPairs[val]; eigenValues.data_[val] = std::get<0>(e); std::copy(std::get<1>(e), std::get<1>(e) + nCols_, sortedEigenVectors[val]); } sortedEigenVectors.transposeInPlace(); return {std::move(eigenValues), std::move(sortedEigenVectors)}; } }; /*! * A helper function to allow printing of Matrix objects to a stream. */ template <typename Real> std::ostream& operator<<(std::ostream& os, Matrix<Real> const& m) { return m.write(os); } } // Namespace helpme #endif // Header guard #include <vector> namespace helpme { static inline int cartesianAddress(int lx, int ly, int lz) { int l = lx + ly + lz; return lz * (2 * l - lz + 3) / 2 + ly; } /*! * \brief makeCartesianRotationMatrix builds a rotation matrix for unique Cartesian * components with a given angular momentum. The algorithm used here is the simple * version (eq. 18) from D. M. Elking, J. Comp. Chem., 37 2067 (2016). It's definitely * not the fastest way to do it, but will be revisited if profiling shows it to be an issue. * \param angularMomentum the angular momentum of the rotation matrix desired. * \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old. * \return the rotation matrix */ template <typename Real> Matrix<Real> makeCartesianRotationMatrix(int angularMomentum, const Matrix<Real> &transformer) { Real R00 = transformer[0][0]; Real R01 = transformer[0][1]; Real R02 = transformer[0][2]; Real R10 = transformer[1][0]; Real R11 = transformer[1][1]; Real R12 = transformer[1][2]; Real R20 = transformer[2][0]; Real R21 = transformer[2][1]; Real R22 = transformer[2][2]; int nComponents = (angularMomentum + 1) * (angularMomentum + 2) / 2; auto factorial = std::vector<int>(2 * angularMomentum + 1); factorial[0] = 1; for (int l = 1; l <= 2 * angularMomentum; ++l) factorial[l] = l * factorial[l - 1]; Matrix<Real> R(nComponents, nComponents); for (int nz = 0; nz <= angularMomentum; ++nz) { for (int ny = 0; ny <= angularMomentum - nz; ++ny) { int nx = angularMomentum - ny - nz; for (int pz = 0; pz <= nx; ++pz) { for (int py = 0; py <= nx - pz; ++py) { int px = nx - py - pz; for (int qz = 0; qz <= ny; ++qz) { for (int qy = 0; qy <= ny - qz; ++qy) { int qx = ny - qy - qz; for (int rz = 0; rz <= nz; ++rz) { for (int ry = 0; ry <= nz - rz; ++ry) { int rx = nz - ry - rz; int mx = px + qx + rx; int my = py + qy + ry; int mz = pz + qz + rz; int m = mx + my + mz; if (m == angularMomentum) { Real normx = factorial[mx] / (factorial[px] * factorial[qx] * factorial[rx]); Real normy = factorial[my] / (factorial[py] * factorial[qy] * factorial[ry]); Real normz = factorial[mz] / (factorial[pz] * factorial[qz] * factorial[rz]); Real Rx = std::pow(R00, px) * std::pow(R10, py) * std::pow(R20, pz); Real Ry = std::pow(R01, qx) * std::pow(R11, qy) * std::pow(R21, qz); Real Rz = std::pow(R02, rx) * std::pow(R12, ry) * std::pow(R22, rz); Real term = normx * normy * normz * Rx * Ry * Rz; R[cartesianAddress(mx, my, mz)][cartesianAddress(nx, ny, nz)] += term; } } } } } } } } } return R; } /*! * \brief matrixVectorProduct A naive implementation of matrix-vector products, avoiding BLAS requirements (for now). * \param transformer the transformation matrix. * \param inputVector the vector to be transformed. * \param outputVector the transformed vector. */ template <typename Real> void matrixVectorProduct(const Matrix<Real> &transformer, const Real *inputVector, Real *outputVector) { int dimension = transformer.nRows(); for (int row = 0; row < dimension; ++row) { outputVector[row] = std::inner_product(inputVector, inputVector + dimension, transformer[row], Real(0)); } } /*! * \brief cartesianTransform transforms a list of a cartesian quantities to a different basis. * Assumes a list of quantities are to be transformed (in place) and all angular momentum * components up to and including the specified maximum are present in ascending A.M. order. * \param maxAngularMomentum the maximum angular momentum of the incoming quantity. * \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old. * \param transformee the quantity to be transformed, stored as nAtoms X nComponents, with * components being the fast running index. */ template <typename Real> Matrix<Real> cartesianTransform(int maxAngularMomentum, const Matrix<Real> &transformer, const Matrix<Real> &transformee) { Matrix<Real> transformed = transformee.clone(); int offset = 1; int nAtoms = transformee.nRows(); for (int angularMomentum = 1; angularMomentum <= maxAngularMomentum; ++angularMomentum) { auto rotationMatrix = makeCartesianRotationMatrix(angularMomentum, transformer); for (int atom = 0; atom < nAtoms; ++atom) { const Real *inputData = transformee[atom]; Real *outputData = transformed[atom]; matrixVectorProduct(rotationMatrix, inputData + offset, outputData + offset); } offset += (angularMomentum + 1) * (angularMomentum + 2) / 2; } return transformed; } } // Namespace helpme #endif // Header guard // original file: ../src/fftw_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_FFTW_WRAPPER_H_ #define _HELPME_FFTW_WRAPPER_H_ #include <complex> #include <iostream> #include <limits> #include <stdexcept> #include <type_traits> #include <fftw3.h> // #include "memory.h" namespace helpme { /*! * \brief The FFTWTypes class is a placeholder to lookup function names and types in FFTW parlance by template. */ template <typename Real> struct FFTWTypes { struct EmptyPlan { int unused; }; using Plan = int; using Complex = std::complex<int>; static Plan makePlan4(size_t, void *, void *, int) { return 0; }; static Plan makePlan5(size_t, void *, void *, int, int) { return 0; }; static void execPlan1(Plan){}; static void execPlan3(Plan, void *, void *){}; static constexpr bool isImplemented = false; static constexpr decltype(&makePlan4) MakeRealToComplexPlan = &makePlan4; static constexpr decltype(&makePlan4) MakeComplexToRealPlan = &makePlan4; static constexpr decltype(&makePlan5) MakeComplexToComplexPlan = &makePlan5; static constexpr decltype(&execPlan3) ExecuteRealToComplexPlan = &execPlan3; static constexpr decltype(&execPlan3) ExecuteComplexToRealPlan = &execPlan3; static constexpr decltype(&execPlan3) ExecuteComplexToComplexPlan = &execPlan3; static constexpr decltype(&execPlan1) DestroyPlan = &execPlan1; static constexpr decltype(&execPlan1) CleanupFFTW = &execPlan1; }; #if HAVE_FFTWF == 1 template <> struct FFTWTypes<float> { using Plan = fftwf_plan; using Complex = fftwf_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftwf_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwf_plan_dft_r2c_1d; static constexpr decltype(&fftwf_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwf_plan_dft_c2r_1d; static constexpr decltype(&fftwf_plan_dft_1d) MakeComplexToComplexPlan = &fftwf_plan_dft_1d; static constexpr decltype(&fftwf_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwf_execute_dft_r2c; static constexpr decltype(&fftwf_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwf_execute_dft_c2r; static constexpr decltype(&fftwf_execute_dft) ExecuteComplexToComplexPlan = &fftwf_execute_dft; static constexpr decltype(&fftwf_destroy_plan) DestroyPlan = &fftwf_destroy_plan; static constexpr decltype(&fftwf_cleanup) CleanupFFTW = &fftwf_cleanup; }; #endif // HAVE_FFTWF #if HAVE_FFTWD == 1 template <> struct FFTWTypes<double> { using Plan = fftw_plan; using Complex = fftw_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftw_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftw_plan_dft_r2c_1d; static constexpr decltype(&fftw_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftw_plan_dft_c2r_1d; static constexpr decltype(&fftw_plan_dft_1d) MakeComplexToComplexPlan = &fftw_plan_dft_1d; static constexpr decltype(&fftw_execute_dft_r2c) ExecuteRealToComplexPlan = &fftw_execute_dft_r2c; static constexpr decltype(&fftw_execute_dft_c2r) ExecuteComplexToRealPlan = &fftw_execute_dft_c2r; static constexpr decltype(&fftw_execute_dft) ExecuteComplexToComplexPlan = &fftw_execute_dft; static constexpr decltype(&fftw_destroy_plan) DestroyPlan = &fftw_destroy_plan; static constexpr decltype(&fftw_cleanup) CleanupFFTW = &fftw_cleanup; }; #endif // HAVE_FFTWD #if HAVE_FFTWL == 1 template <> struct FFTWTypes<long double> { using Plan = fftwl_plan; using Complex = fftwl_complex; static constexpr bool isImplemented = true; static constexpr decltype(&fftwl_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwl_plan_dft_r2c_1d; static constexpr decltype(&fftwl_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwl_plan_dft_c2r_1d; static constexpr decltype(&fftwl_plan_dft_1d) MakeComplexToComplexPlan = &fftwl_plan_dft_1d; static constexpr decltype(&fftwl_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwl_execute_dft_r2c; static constexpr decltype(&fftwl_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwl_execute_dft_c2r; static constexpr decltype(&fftwl_execute_dft) ExecuteComplexToComplexPlan = &fftwl_execute_dft; static constexpr decltype(&fftwl_destroy_plan) DestroyPlan = &fftwl_destroy_plan; static constexpr decltype(&fftwl_cleanup) CleanupFFTW = &fftwl_cleanup; }; #endif // HAVE_FFTWL /*! * \brief The FFTWWrapper class is a convenient wrapper to abstract away the details of different * precision modes for FFTW, where the types and function names differ. */ template <typename Real> class FFTWWrapper { using typeinfo = FFTWTypes<Real>; using Plan = typename typeinfo::Plan; using Complex = typename typeinfo::Complex; protected: /// An FFTW plan object, describing out of place complex to complex forward transforms. typename typeinfo::Plan forwardPlan_; /// An FFTW plan object, describing out of place complex to complex inverse transforms. typename typeinfo::Plan inversePlan_; /// An FFTW plan object, describing in place complex to complex forward transforms. typename typeinfo::Plan forwardInPlacePlan_; /// An FFTW plan object, describing in place complex to complex inverse transforms. typename typeinfo::Plan inverseInPlacePlan_; /// An FFTW plan object, describing out of place real to complex forward transforms. typename typeinfo::Plan realToComplexPlan_; /// An FFTW plan object, describing out of place complex to real inverse transforms. typename typeinfo::Plan complexToRealPlan_; /// The size of the real data. size_t fftDimension_; /// The flags to be passed to the FFTW plan creator, to determine startup cost. unsigned transformFlags_; public: FFTWWrapper() {} FFTWWrapper(size_t fftDimension) : fftDimension_(fftDimension), transformFlags_(FFTW_ESTIMATE) { if (!typeinfo::isImplemented) { throw std::runtime_error( "Attempting to call FFTW using a precision mode that has not been linked. " "Make sure that -DHAVE_FFTWF=1, -DHAVE_FFTWD=1 or -DHAVE_FFTWL=1 is added to the compiler flags" "for single, double and long double precision support, respectively."); } helpme::vector<Real> realTemp(fftDimension_); helpme::vector<std::complex<Real>> complexTemp1(fftDimension_); helpme::vector<std::complex<Real>> complexTemp2(fftDimension_); Real *realPtr = realTemp.data(); Complex *complexPtr1 = reinterpret_cast<Complex *>(complexTemp1.data()); Complex *complexPtr2 = reinterpret_cast<Complex *>(complexTemp2.data()); forwardPlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_FORWARD, transformFlags_); inversePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_BACKWARD, transformFlags_); forwardInPlacePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_FORWARD, transformFlags_); inverseInPlacePlan_ = typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_BACKWARD, transformFlags_); realToComplexPlan_ = typeinfo::MakeRealToComplexPlan(fftDimension_, realPtr, complexPtr1, transformFlags_); complexToRealPlan_ = typeinfo::MakeComplexToRealPlan(fftDimension_, complexPtr1, realPtr, transformFlags_); } /*! * \brief transform call FFTW to do an out of place complex to real FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. */ void transform(std::complex<Real> *inBuffer, Real *outBuffer) { typeinfo::ExecuteComplexToRealPlan(complexToRealPlan_, reinterpret_cast<Complex *>(inBuffer), outBuffer); } /*! * \brief transform call FFTW to do an out of place real to complex FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. */ void transform(Real *inBuffer, std::complex<Real> *outBuffer) { typeinfo::ExecuteRealToComplexPlan(realToComplexPlan_, inBuffer, reinterpret_cast<Complex *>(outBuffer)); } /*! * \brief transform call FFTW to do an in place complex to complex FFT. * \param inPlaceBuffer the location of the input and output data. * \param direction either FFTW_FORWARD or FFTW_BACKWARD. */ void transform(std::complex<Real> *inPlaceBuffer, int direction) { Complex *inPlacePtr = reinterpret_cast<Complex *>(inPlaceBuffer); switch (direction) { case FFTW_FORWARD: typeinfo::ExecuteComplexToComplexPlan(forwardInPlacePlan_, inPlacePtr, inPlacePtr); break; case FFTW_BACKWARD: typeinfo::ExecuteComplexToComplexPlan(inverseInPlacePlan_, inPlacePtr, inPlacePtr); break; default: throw std::runtime_error("Invalid FFTW transform passed to in place transform()."); } } /*! * \brief transform call FFTW to do an out of place complex to complex FFT. * \param inBuffer the location of the input data. * \param outBuffer the location of the output data. * \param direction either FFTW_FORWARD or FFTW_BACKWARD. */ void transform(std::complex<Real> *inBuffer, std::complex<Real> *outBuffer, int direction) { Complex *inPtr = reinterpret_cast<Complex *>(inBuffer); Complex *outPtr = reinterpret_cast<Complex *>(outBuffer); switch (direction) { case FFTW_FORWARD: typeinfo::ExecuteComplexToComplexPlan(forwardPlan_, inPtr, outPtr); break; case FFTW_BACKWARD: typeinfo::ExecuteComplexToComplexPlan(inversePlan_, inPtr, outPtr); break; default: throw std::runtime_error("Invalid FFTW transform passed to transform()."); } } }; } // Namespace helpme #endif // Header guard // original file: ../src/gamma.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_GAMMA_H_ #define _HELPME_GAMMA_H_ #include <cmath> #include <limits> /*! * \file gamma.h * \brief Contains C++ implementations of templated gamma and incomplete gamma functions, computed using recursion. */ namespace helpme { constexpr long double sqrtPi = 1.77245385090551602729816748334114518279754945612238712821381L; /*! * Compute upper incomplete gamma functions for positive half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$ */ template <typename Real, int twoS, bool isPositive> struct incompleteGammaRecursion { static Real compute(Real x) { return (0.5f * twoS - 1) * incompleteGammaRecursion<Real, twoS - 2, isPositive>::compute(x) + pow(x, (0.5f * twoS - 1)) * exp(-x); } }; /*! * Compute upper incomplete gamma functions for negative half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] - * 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$ */ template <typename Real, int twoS> struct incompleteGammaRecursion<Real, twoS, false> { static Real compute(Real x) { return (incompleteGammaRecursion<Real, twoS + 2, false>::compute(x) - pow(x, 0.5f * twoS) * exp(-x)) / (0.5f * twoS); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 2, true> { static Real compute(Real x) { return exp(-x); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 1, false> { static Real compute(Real x) { return sqrtPi * erfc(std::sqrt(x)); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 1, true> { static Real compute(Real x) { return sqrtPi * erfc(std::sqrt(x)); } }; /// Specific value of incomplete gamma function. template <typename Real> struct incompleteGammaRecursion<Real, 0, false> { static Real compute(Real x) { // Gamma(0,x) is (minus) the exponential integral of -x. This implementation was stolen from // http://www.mymathlib.com/c_source/functions/exponential_integrals/exponential_integral_Ei.c x = -x; if (x < -5.0L) return -(Real)Continued_Fraction_Ei(x); if (x == 0.0L) return std::numeric_limits<Real>::max(); if (x < 6.8L) return -(Real)Power_Series_Ei(x); if (x < 50.0L) return -(Real)Argument_Addition_Series_Ei(x); return -(Real)Continued_Fraction_Ei(x); } private: static constexpr long double epsilon = 10.0 * std::numeric_limits<long double>::epsilon(); //////////////////////////////////////////////////////////////////////////////// // static long double Continued_Fraction_Ei( long double x ) // // // // Description: // // For x < -5 or x > 50, the continued fraction representation of Ei // // converges fairly rapidly. // // // // The continued fraction expansion of Ei(x) is: // // Ei(x) = -exp(x) { 1/(-x+1-) 1/(-x+3-) 4/(-x+5-) 9/(-x+7-) ... }. // // // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Continued_Fraction_Ei(long double x) { long double Am1 = 1.0L; long double A0 = 0.0L; long double Bm1 = 0.0L; long double B0 = 1.0L; long double a = std::exp(x); long double b = -x + 1.0L; long double Ap1 = b * A0 + a * Am1; long double Bp1 = b * B0 + a * Bm1; int j = 1; a = 1.0L; while (std::fabs(Ap1 * B0 - A0 * Bp1) > epsilon * std::fabs(A0 * Bp1)) { if (std::fabs(Bp1) > 1.0L) { Am1 = A0 / Bp1; A0 = Ap1 / Bp1; Bm1 = B0 / Bp1; B0 = 1.0L; } else { Am1 = A0; A0 = Ap1; Bm1 = B0; B0 = Bp1; } a = -j * j; b += 2.0L; Ap1 = b * A0 + a * Am1; Bp1 = b * B0 + a * Bm1; j += 1; } return (-Ap1 / Bp1); } //////////////////////////////////////////////////////////////////////////////// // static long double Power_Series_Ei( long double x ) // // // // Description: // // For -5 < x < 6.8, the power series representation for (Ei(x) - gamma // // - ln|x|)/exp(x) is used, where gamma is Euler's gamma constant. // // Note that for x = 0.0, Ei is -inf. In which case -DBL_MAX is // // returned. // // // // The power series expansion of (Ei(x) - gamma - ln|x|) / exp(x) is // // - Sum(1 + 1/2 + ... + 1/j) (-x)^j / j!, where the Sum extends // // from j = 1 to inf. // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Power_Series_Ei(long double x) { long double xn = -x; long double Sn = -x; long double Sm1 = 0.0L; long double hsum = 1.0L; long double g = 0.5772156649015328606065121L; long double y = 1.0L; long double factorial = 1.0L; while (std::fabs(Sn - Sm1) > epsilon * std::fabs(Sm1)) { Sm1 = Sn; y += 1.0L; xn *= (-x); factorial *= y; hsum += (1.0 / y); Sn += hsum * xn / factorial; } return (g + std::log(std::fabs(x)) - std::exp(x) * Sn); } //////////////////////////////////////////////////////////////////////////////// // static long double Argument_Addition_Series_Ei(long double x) // // // // Description: // // For 6.8 < x < 50.0, the argument addition series is used to calculate // // Ei. // // // // The argument addition series for Ei(x) is: // // Ei(x+dx) = Ei(x) + exp(x) Sum j! [exp(j) expj(-dx) - 1] / x^(j+1), // // where the Sum extends from j = 0 to inf, |x| > |dx| and expj(y) is // // the exponential polynomial expj(y) = Sum y^k / k!, // // the Sum extending from k = 0 to k = j. // // // // Arguments: // // long double x // // The argument of the exponential integral Ei(). // // // // Return Value: // // The value of the exponential integral Ei evaluated at x. // //////////////////////////////////////////////////////////////////////////////// static long double Argument_Addition_Series_Ei(long double x) { static long double ei[] = { 1.915047433355013959531e2L, 4.403798995348382689974e2L, 1.037878290717089587658e3L, 2.492228976241877759138e3L, 6.071406374098611507965e3L, 1.495953266639752885229e4L, 3.719768849068903560439e4L, 9.319251363396537129882e4L, 2.349558524907683035782e5L, 5.955609986708370018502e5L, 1.516637894042516884433e6L, 3.877904330597443502996e6L, 9.950907251046844760026e6L, 2.561565266405658882048e7L, 6.612718635548492136250e7L, 1.711446713003636684975e8L, 4.439663698302712208698e8L, 1.154115391849182948287e9L, 3.005950906525548689841e9L, 7.842940991898186370453e9L, 2.049649711988081236484e10L, 5.364511859231469415605e10L, 1.405991957584069047340e11L, 3.689732094072741970640e11L, 9.694555759683939661662e11L, 2.550043566357786926147e12L, 6.714640184076497558707e12L, 1.769803724411626854310e13L, 4.669055014466159544500e13L, 1.232852079912097685431e14L, 3.257988998672263996790e14L, 8.616388199965786544948e14L, 2.280446200301902595341e15L, 6.039718263611241578359e15L, 1.600664914324504111070e16L, 4.244796092136850759368e16L, 1.126348290166966760275e17L, 2.990444718632336675058e17L, 7.943916035704453771510e17L, 2.111342388647824195000e18L, 5.614329680810343111535e18L, 1.493630213112993142255e19L, 3.975442747903744836007e19L, 1.058563689713169096306e20L}; int k = (int)(x + 0.5f); int j = 0; long double xx = (long double)k; long double dx = x - xx; long double xxj = xx; long double edx = std::exp(dx); long double Sm = 1.0L; long double Sn = (edx - 1.0L) / xxj; long double term = std::numeric_limits<double>::max(); long double factorial = 1.0L; long double dxj = 1.0L; while (std::fabs(term) > epsilon * std::fabs(Sn)) { j++; factorial *= (long double)j; xxj *= xx; dxj *= (-dx); Sm += (dxj / factorial); term = (factorial * (edx * Sm - 1.0L)) / xxj; Sn += term; } return ei[k - 7] + Sn * std::exp(xx); } }; /*! * Compute gamma function for positive half-integral s values using the recursion. * \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \Gamma[\frac{\mathrm{twoS}-2}{2}]\frac{\mathrm{twoS}-2}{2} \f$ */ template <typename Real, int twoS, bool isPositive> struct gammaRecursion { static constexpr Real value = gammaRecursion<Real, twoS - 2, isPositive>::value * (0.5f * twoS - 1); }; /*! * Compute gamma function for negative half-integral s values using the recursion. * \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \frac{2\Gamma[\frac{\mathrm{twoS}_2}{2}]}{\mathrm{twoS}} \f$ * Returns infinity (expressed as the largest value representable by Real) for \f$twoS = 0, -2, -4, -6, \ldots\f$ . */ template <typename Real, int twoS> struct gammaRecursion<Real, twoS, false> { static constexpr Real value = gammaRecursion<Real, twoS + 2, false>::value == std::numeric_limits<Real>::max() ? std::numeric_limits<Real>::max() : gammaRecursion<Real, twoS + 2, false>::value / (0.5f * twoS); }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 0, false> { static constexpr Real value = std::numeric_limits<Real>::max(); }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 1, true> { static constexpr Real value = sqrtPi; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 1, false> { static constexpr Real value = sqrtPi; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 2, true> { static constexpr Real value = 1; }; /// Specific value of the Gamma function. template <typename Real> struct gammaRecursion<Real, 2, false> { static constexpr Real value = 1; }; /*! * \class incompleteGammaComputer * \brief Computes the upper incomplete Gamma function. * \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct incompleteGammaComputer { /*! * \brief Computes the incomplete gamma function. * \param x value required. * \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x^2]\f$. */ static Real compute(Real x) { return incompleteGammaRecursion<Real, twoS, (twoS > 0)>::compute(x); } }; /*! * Compute upper incomplete gamma functions for positive half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$ */ template <typename Real, int twoS, bool isPositive> struct incompleteVirialGammaRecursion { static std::pair<Real, Real> compute(Real x) { Real gamma = incompleteGammaComputer<Real, twoS>::compute(x); return {gamma, (0.5f * twoS) * gamma + pow(x, (0.5f * twoS)) * exp(-x)}; } }; /*! * Compute upper incomplete gamma functions for negative half-integral s values using the recursion * \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] - * 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$ */ template <typename Real, int twoS> struct incompleteVirialGammaRecursion<Real, twoS, false> { static std::pair<Real, Real> compute(Real x) { Real gamma = incompleteGammaComputer<Real, twoS + 2>::compute(x); return {(gamma - pow(x, 0.5f * twoS) * exp(-x)) / (0.5f * twoS), gamma}; } }; /*! * \class incompleteGammaVirialComputer * \brief Computes the upper incomplete Gamma function for two different values: s and s+1. * \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct incompleteGammaVirialComputer { /*! * \brief Computes the incomplete gamma function for argument twoS and twoS+2. * \param x value required. * \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x]\f$ and \f$\Gamma[\frac{\mathrm{twoS+2}}{2}, x]\f$. */ static std::pair<Real, Real> compute(Real x) { return incompleteVirialGammaRecursion<Real, twoS, (twoS >= 0)>::compute(x); } }; /*! * \class gammaComputer * \brief Computes the Gamma function. * \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral values for the \f$s\f$ argument, so the input * argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$. * \tparam Real the floating point type to use for arithmetic. * \tparam twoS twice the s value required. */ template <typename Real, int twoS> struct gammaComputer { /// The value of \f$\Gamma[\frac{\mathrm{twos}}{2}]\f$ static constexpr Real value = gammaRecursion<Real, twoS, (twoS > 0)>::value; }; /*! * \brief Computes the Gamma function using recursion instead of template metaprogramming. * \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$ * In this code we only need half integral values for the \f$s\f$ argument, so the input * argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$. * \tparam Real the floating point type to use for arithmetic. * \param twoS twice the s value required. */ template <typename Real> Real nonTemplateGammaComputer(int twoS) { if (twoS == 1) { return sqrtPi; } else if (twoS == 2) { return 1; } else if (twoS <= 0 && twoS % 2 == 0) { return std::numeric_limits<Real>::max(); } else if (twoS > 0) { return nonTemplateGammaComputer<Real>(twoS - 2) * (0.5f * twoS - 1); } else { return nonTemplateGammaComputer<Real>(twoS + 2) / (0.5f * twoS); } } } // Namespace helpme #endif // Header guard // original file: ../src/gridsize.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_GRIDSIZE_H_ #define _HELPME_GRIDSIZE_H_ #include <algorithm> #include <cmath> #include <initializer_list> #include <vector> namespace helpme { // N.B. The templates here are just to avoid multiple definitions in the .so file. /*! * \brief allDivisors checks that a list of values are divisors of a given input value. * \param gridSize the gridSize to check for divisors. * \param requiredDivisors the list of divisors. * \return whether all listed values are divisors of gridSize. */ template <typename T> bool allDivisors(T gridSize, const std::initializer_list<T> &requiredDivisors) { for (const T &divisor : requiredDivisors) if (gridSize % divisor) return false; return true; } /*! * \brief findGridSize FFTW likes to have transformations with dimensions of the form * * a b c d e f * 2 3 5 7 11 13 * * where a,b,c and d are general and e+f is either 0 or 1. MKL has similar demands: * * https://software.intel.com/en-us/articles/fft-length-and-layout-advisor/ * http://www.fftw.org/fftw3_doc/Real_002ddata-DFTs.html * * This routine will compute the next largest grid size subject to the constraint that the * resulting size is a multiple of a given factor. * \param inputSize the minimum size of the grid. * \param requiredDivisors list of values that must be a factor of the output grid size. * \return the adjusted grid size. */ template <typename T> int findGridSize(T inputSize, const std::initializer_list<T> &requiredDivisors) { std::vector<int> primeFactors{2, 3, 5, 7}; T minDivisor = std::min(requiredDivisors); T currentSize = minDivisor * std::ceil(static_cast<float>(inputSize) / minDivisor); while (true) { // Now we know that the grid size is a multiple of requiredFactor, check // that it satisfies the prime factor requirements stated above. T remainder = currentSize; for (const int &factor : primeFactors) while (remainder > 1 && remainder % factor == 0) remainder /= factor; if ((remainder == 1 || remainder == 11 || remainder == 13) && allDivisors(currentSize, requiredDivisors)) return currentSize; currentSize += minDivisor; } } } // Namespace helpme #endif // Header guard // #include "matrix.h" // #include "memory.h" #if HAVE_MPI == 1 // original file: ../src/mpi_wrapper.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_MPI_WRAPPER_H_ #define _HELPME_MPI_WRAPPER_H_ #include <mpi.h> #include <complex> #include <iomanip> #include <iostream> #include <stdexcept> namespace helpme { /*! * \brief The MPITypes struct abstracts away the MPI_Datatype types for different floating point modes * using templates to hide the details from the caller. */ template <typename Real> struct MPITypes { MPI_Datatype realType_; MPI_Datatype complexType_; MPITypes() { throw std::runtime_error("MPI wrapper has not been implemented for the requested floating point type."); } }; template <> MPITypes<float>::MPITypes() : realType_(MPI_FLOAT), complexType_(MPI_C_COMPLEX) {} template <> MPITypes<double>::MPITypes() : realType_(MPI_DOUBLE), complexType_(MPI_C_DOUBLE_COMPLEX) {} template <> MPITypes<long double>::MPITypes() : realType_(MPI_LONG_DOUBLE), complexType_(MPI_C_LONG_DOUBLE_COMPLEX) {} /*! * \brief The MPIWrapper struct is a lightweight C++ wrapper around the C MPI functions. Its main * purpose is to provide RAII semantics, ensuring that memory is correctly freed. It also * conveniently abstracts away the different MPI type descriptors for each floating point type. */ template <typename Real> struct MPIWrapper { MPITypes<Real> types_; /// The MPI communicator instance to use for all reciprocal space work. MPI_Comm mpiCommunicator_; /// The total number of MPI nodes involved in reciprocal space work. int numNodes_; /// The MPI rank of this node. int myRank_; /// The number of nodes in the X direction. int numNodesX_; /// The number of nodes in the Y direction. int numNodesY_; /// The number of nodes in the Z direction. int numNodesZ_; void assertNodePartitioningValid(int numNodes, int numNodesX, int numNodesY, int numNodesZ) const { if (numNodes != numNodesX * numNodesY * numNodesZ) throw std::runtime_error( "Communicator world size does not match the numNodesX, numNodesY, numNodesZ passed in."); } MPIWrapper() : mpiCommunicator_(0), numNodes_(0), myRank_(0) {} MPIWrapper(const MPI_Comm& communicator, int numNodesX, int numNodesY, int numNodesZ) : numNodesX_(numNodesX), numNodesY_(numNodesY), numNodesZ_(numNodesZ) { if (MPI_Comm_dup(communicator, &mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_dup in MPIWrapper constructor."); if (MPI_Comm_size(mpiCommunicator_, &numNodes_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper constructor."); if (MPI_Comm_rank(mpiCommunicator_, &myRank_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper constructor."); assertNodePartitioningValid(numNodes_, numNodesX, numNodesY, numNodesZ); } ~MPIWrapper() { if (mpiCommunicator_) MPI_Comm_free(&mpiCommunicator_); } /*! * \brief barrier wait for all members of this communicator to reach this point. */ void barrier() { if (MPI_Barrier(mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem in MPI Barrier call!"); } /*! * \brief split split this communicator into subgroups. * \param color the number identifying the subgroup the new communicator belongs to. * \param key the rank of the new communicator within the subgroup. * \return the new communicator. */ std::unique_ptr<MPIWrapper> split(int color, int key) { std::unique_ptr<MPIWrapper> newWrapper(new MPIWrapper); if (MPI_Comm_split(mpiCommunicator_, color, key, &newWrapper->mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_split in MPIWrapper split."); if (MPI_Comm_size(newWrapper->mpiCommunicator_, &newWrapper->numNodes_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper split."); if (MPI_Comm_rank(newWrapper->mpiCommunicator_, &newWrapper->myRank_) != MPI_SUCCESS) throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper split."); return newWrapper; } /*! * \brief allToAll perform alltoall communication within this communicator. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be communicated. */ void allToAll(std::complex<Real>* inBuffer, std::complex<Real>* outBuffer, int dimension) { if (MPI_Alltoall(inBuffer, 2 * dimension, types_.realType_, outBuffer, 2 * dimension, types_.realType_, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI alltoall."); } /*! * \brief allToAll perform alltoall communication within this communicator. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to. * \param dimension the number of elements to be communicated. */ void allToAll(Real* inBuffer, Real* outBuffer, int dimension) { if (MPI_Alltoall(inBuffer, dimension, types_.realType_, outBuffer, dimension, types_.realType_, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI alltoall."); } /*! * \brief reduce performs a reduction, with summation as the operation. * \param inBuffer the buffer containing input data. * \param outBuffer the buffer to send results to, which will be sent to node 0. * \param dimension the number of elements to be reduced. */ void reduce(Real* inBuffer, Real* outBuffer, int dimension) { if (MPI_Reduce(inBuffer, outBuffer, dimension, types_.realType_, MPI_SUM, 0, mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem encountered calling MPI reduce."); } /*! * \brief operator << a convenience wrapper around ostream, to inject node info. */ friend std::ostream& operator<<(std::ostream& os, const MPIWrapper& obj) { os << "Node " << obj.myRank_ << " of " << obj.numNodes_ << ":" << std::endl; return os; } }; // Adapter to allow piping of streams into unique_ptr-held object template <typename Real> std::ostream& operator<<(std::ostream& os, const std::unique_ptr<MPIWrapper<Real>>& obj) { os << *obj; return os; } // A convenience macro to guarantee that each node prints in order. #define PRINT(out) \ if (mpiCommunicator_) { \ for (int node = 0; node < mpiCommunicator_->numNodes_; ++node) { \ std::cout.setf(std::ios::fixed, std::ios::floatfield); \ if (node == mpiCommunicator_->myRank_) \ std::cout << mpiCommunicator_ << std::setw(18) << std::setprecision(10) << out << std::endl; \ mpiCommunicator_->barrier(); \ }; \ } else { \ std::cout << std::setw(18) << std::setprecision(10) << out << std::endl; \ } } // Namespace helpme #endif // Header guard #else typedef struct ompi_communicator_t *MPI_Comm; #endif // original file: ../src/powers.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_POWERS_H_ #define _HELPME_POWERS_H_ #include <cmath> /*! * \file powers.h * \brief Contains template functions to compute various quantities raised to an integer power. */ namespace helpme { template <typename Real, int n> struct raiseToIntegerPower { static Real pow(Real val) { return val * raiseToIntegerPower<Real, n - 1>::pow(val); } }; /// Base recursion for the power. template <typename Real> struct raiseToIntegerPower<Real, 0> { static Real pow(Real) { return 1; } }; /// n is positive and even case template <typename Real, int n, bool nIsPositive, bool nIsEven> struct normIntegerPowerComputer { static Real compute(Real val) { return raiseToIntegerPower<Real, n / 2>::pow(val); } }; /// n is positive and odd case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, true, false> { static Real compute(Real val) { return raiseToIntegerPower<Real, n>::pow(std::sqrt(val)); } }; /// n is negative and even case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, false, true> { static Real compute(Real val) { return raiseToIntegerPower<Real, -n / 2>::pow(1 / val); } }; /// n is negative and odd case template <typename Real, int n> struct normIntegerPowerComputer<Real, n, false, false> { static Real compute(Real val) { return raiseToIntegerPower<Real, -n>::pow(1 / sqrt(val)); } }; /*! * \brief Compute a quantity exponentiated by an integer power, using multiplication, * at compile time. The exponent is assumed to be positve. * \tparam Real the floating point type to use for arithmetic. * \tparam n the exponent to raise the value to. */ template <typename Real, int n> struct raiseNormToIntegerPower { /*! * \brief pow compute the norm raised to the power n. * \param val the square of the norm to be exponentiated. * \return the norm raised to the integer power. */ static Real compute(Real val) { return normIntegerPowerComputer<Real, n, (n >= 0), (n % 2 == 0)>::compute(val); } }; } // Namespace helpme #endif // Header guard // original file: ../src/splines.h // BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_SPLINES_H_ #define _HELPME_SPLINES_H_ // #include "matrix.h" /*! * \file splines.h * \brief Contains the C++ implementation of a cardinal B-Splines. */ namespace helpme { /*! * \class BSpline * \brief A class to compute cardinal B-splines. This code can compute arbitrary-order B-splines of * arbitrary derivative level, subject to the usual constraint that an order m spline is * differentiable m-2 times. * \tparam Real the floating point type to use for arithmetic. */ template <typename Real> class BSpline { protected: /// The order of this B-spline. short order_; /// The maximum derivative level for this B-spline. short derivativeLevel_; /// B-Splines with rows corresponding to derivative level, and columns to spline component. Matrix<Real> splines_; /// The grid point at which to start interpolation. short startingGridPoint_; /// Makes B-Spline array. inline void makeSplineInPlace(Real *array, const Real &val, const short &n) const { Real denom = (Real)1 / (n - 1); array[n - 1] = denom * val * array[n - 2]; for (short j = 1; j < n - 1; ++j) array[n - j - 1] = denom * ((val + j) * array[n - j - 2] + (n - j - val) * array[n - j - 1]); array[0] *= denom * (1 - val); } /// Takes BSpline derivative. inline void differentiateSpline(const Real *array, Real *dArray, const short &n) const { dArray[0] = -array[0]; for (short j = 1; j < n - 1; ++j) dArray[j] = array[j - 1] - array[j]; dArray[n - 1] = array[n - 2]; } /*! * \brief assertSplineIsSufficient ensures that the spline is large enough to be differentiable. * An mth order B-Spline is differentiable m-2 times. */ void assertSplineIsSufficient(int splineOrder, int derivativeLevel) const { if (splineOrder - derivativeLevel < 2) { std::string msg( "The spline order used is not sufficient for the derivative level requested." "Set the spline order to at least "); msg += std::to_string(derivativeLevel + 2); msg += " to run this calculation."; throw std::runtime_error(msg); } } public: /// The B-splines and their derivatives. See update() for argument details. BSpline(short start, Real value, short order, short derivativeLevel) : splines_(derivativeLevel + 1, order) { update(start, value, order, derivativeLevel); } /*! * \brief update computes information for BSpline, without reallocating memory unless needed. * \param start the grid point at which to start interpolation. * \param value the distance (in fractional coordinates) from the starting grid point. * \param order the order of the BSpline. * \param derivativeLevel the maximum level of derivative needed for this BSpline. */ void update(short start, Real value, short order, short derivativeLevel) { assertSplineIsSufficient(order, derivativeLevel); startingGridPoint_ = start; order_ = order; derivativeLevel_ = derivativeLevel; // The +1 is to account for the fact that we need to store entries up to and including the max. if (splines_.nRows() < derivativeLevel + 1 || splines_.nCols() != order) splines_ = Matrix<Real>(derivativeLevel + 1, order); splines_.setZero(); splines_(0, 0) = 1 - value; splines_(0, 1) = value; for (short m = 1; m < order_ - 1; ++m) { makeSplineInPlace(splines_[0], value, m + 2); if (m >= order_ - derivativeLevel_ - 2) { short currentDerivative = order_ - m - 2; for (short l = 0; l < currentDerivative; ++l) differentiateSpline(splines_[l], splines_[l + 1], m + 2 + currentDerivative); } } } BSpline() {} /*! * \brief The modulus of the B-Spline in Fourier space. * \param gridDim the dimension of the grid in the dimension this spline is to be used. * \return a gridDim long vector containing the inverse of the Fourier space spline moduli. */ helpme::vector<Real> invSplineModuli(short gridDim) { helpme::vector<Real> splineMods(gridDim, 0); Real prefac = 2 * M_PI / gridDim; for (int i = 0; i < gridDim; ++i) { Real real = 0; Real imag = 0; for (int j = 0; j < order_; ++j) { Real exparg = i * j * prefac; Real jSpline = splines_(0, j); real += jSpline * cos(exparg); imag += jSpline * sin(exparg); } splineMods[i] = real * real + imag * imag; } // Correct tiny values. constexpr Real EPS = 1e-7f; if (splineMods[0] < EPS) splineMods[0] = splineMods[1] / 2; for (int i = 0; i < gridDim - 1; ++i) if (splineMods[i] < EPS) splineMods[i] = (splineMods[i - 1] + splineMods[i + 1]) / 2; if (splineMods[gridDim - 1] < EPS) splineMods[gridDim - 1] = splineMods[gridDim - 2] / 2; // Invert, to avoid division later on. for (int i = 0; i < gridDim; ++i) splineMods[i] = 1 / splineMods[i]; return splineMods; } /*! * \brief Gets the grid point to start interpolating from. * \return the index of the first grid point this spline supports. */ short startingGridPoint() const { return startingGridPoint_; } /*! * \brief Returns the B-Spline, or derivative thereof. * \param deriv the derivative level of the spline to be returned. */ const Real *operator[](const int &deriv) const { return splines_[deriv]; } /*! * \brief Get read-only access to the full spline data. * \returns a const reference to the full spline data: row index is derivative, col index is spline component. */ const Matrix<Real> &splineData() const { return splines_; } }; } // Namespace helpme #endif // Header guard // #include "string_utils.h" /*! * \file helpme.h * \brief Contains the C++ implementation of a PME Instance, and related helper classes. */ namespace helpme { /*! * \brief nCartesian computes the total number of Cartesian components of a given angular momentum. * \param L the angular momentum. * \return total number of components up to and including angular momentum L. */ static int nCartesian(int L) { return (L + 1) * (L + 2) * (L + 3) / 6; } /*! * \brief cartAddress computes the address of a term with given quantum numbers in a Cartesian buffer. * \param lx the x quantum number. * \param ly the y quantum number. * \param lz the z quantum number. * \return the address of an {lx, ly, lz} quantity in a buffer that contains all lower angular momentum terms too. */ static int cartAddress(int lx, int ly, int lz) { int l = lx + ly + lz; return l * (l + 1) * (l + 2) / 6 + lz * (l * 2 - lz + 3) / 2 + ly; } // This is used to define function pointers in the constructor, and makes it easy to add new kernels. #define ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(n) \ case n: \ convolveEVFxn_ = &convolveEVImpl<n>; \ cacheInfluenceFunctionFxn_ = &cacheInfluenceFunctionImpl<n>; \ slfEFxn_ = &slfEImpl<n>; \ dirEFxn_ = &dirEImpl<n>; \ adjEFxn_ = &adjEImpl<n>; \ dirEFFxn_ = &dirEFImpl<n>; \ adjEFFxn_ = &adjEFImpl<n>; \ break; /*! * \class splineCacheEntry * \brief A placeholder to encapsulate information about a given atom's splines */ template <typename Real> struct SplineCacheEntry { BSpline<Real> aSpline, bSpline, cSpline; int absoluteAtomNumber; SplineCacheEntry(int order, int derivativeLevel) : aSpline(0, 0, order, derivativeLevel), bSpline(0, 0, order, derivativeLevel), cSpline(0, 0, order, derivativeLevel), absoluteAtomNumber(-1) {} }; /*! * \class PMEInstance * \brief A class to encapsulate information related to a particle mesh Ewald calculation. * * By storing information related to a single PME calculation in this way, we allow multiple * instances to be created in calculations requiring multiple PMEs, e.g. for computing both * electrostatic and attractive dispersion terms using PME to handle long-range interactions. * \tparam Real the floating point type to use for arithmetic. */ template <typename Real> class PMEInstance { using GridIterator = std::vector<std::vector<std::pair<short, short>>>; using Complex = std::complex<Real>; using Spline = BSpline<Real>; using RealMat = Matrix<Real>; using RealVec = helpme::vector<Real>; public: /*! * \brief The different conventions for orienting a lattice constructed from input parameters. */ enum class LatticeType : int { XAligned = 0, ShapeMatrix = 1 }; /*! * \brief The different conventions for numbering nodes. */ enum class NodeOrder : int { ZYX = 0 }; protected: /// The FFT grid dimensions in the {A,B,C} grid dimensions. int dimA_, dimB_, dimC_; /// The full A dimension after real->complex transformation. int complexDimA_; /// The locally owned A dimension after real->complex transformation. int myComplexDimA_; /// The order of the cardinal B-Spline used for interpolation. int splineOrder_; /// The actual number of threads per MPI instance, and the number requested previously. int nThreads_, requestedNumberOfThreads_; /// The exponent of the (inverse) interatomic distance used in this kernel. int rPower_; /// The scale factor to apply to all energies and derivatives. Real scaleFactor_; /// The attenuation parameter, whose units should be the inverse of those used to specify coordinates. Real kappa_; /// The lattice vectors. RealMat boxVecs_; /// The reciprocal lattice vectors. RealMat recVecs_; /// The scaled reciprocal lattice vectors, for transforming forces from scaled fractional coordinates. RealMat scaledRecVecs_; /// An iterator over angular momentum components. std::vector<std::array<short, 3>> angMomIterator_; /// The number of permutations of each multipole component. RealVec permutations_; /// From a given starting point on the {A,B,C} edge of the grid, lists all points to be handled, correctly wrapping /// around the end. GridIterator gridIteratorA_, gridIteratorB_, gridIteratorC_; /// The (inverse) bspline moduli to normalize the spreading / probing steps; these are folded into the convolution. RealVec splineModA_, splineModB_, splineModC_; /// The cached influence function involved in the convolution. RealVec cachedInfluenceFunction_; /// A function pointer to call the approprate function to implement convolution with virial, templated to /// the rPower value. std::function<Real(int, int, int, int, int, int, int, Real, Complex *, const RealMat &, Real, Real, const Real *, const Real *, const Real *, RealMat &, int)> convolveEVFxn_; /// A function pointer to call the approprate function to implement cacheing of the influence function that appears // in the convolution, templated to the rPower value. std::function<void(int, int, int, int, int, int, int, Real, RealVec &, const RealMat &, Real, Real, const Real *, const Real *, const Real *, int)> cacheInfluenceFunctionFxn_; /// A function pointer to call the approprate function to compute self energy, templated to the rPower value. std::function<Real(int, const RealMat &, Real, Real)> slfEFxn_; /// A function pointer to call the approprate function to compute the direct energy, templated to the rPower value. std::function<Real(Real, Real)> dirEFxn_; /// A function pointer to call the approprate function to compute the adjusted energy, templated to the rPower /// value. std::function<Real(Real, Real)> adjEFxn_; /// A function pointer to call the approprate function to compute the direct energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> dirEFFxn_; /// A function pointer to call the approprate function to compute the adjusted energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> adjEFFxn_; #if HAVE_MPI == 1 /// The communicator object that handles interactions with MPI. std::unique_ptr<MPIWrapper<Real>> mpiCommunicator_; /// The communicator object that handles interactions with MPI along this nodes {A,B,C} pencils. std::unique_ptr<MPIWrapper<Real>> mpiCommunicatorA_, mpiCommunicatorB_, mpiCommunicatorC_; #endif /// The number of nodes in the {A,B,C} dimensions. int numNodesA_, numNodesB_, numNodesC_; /// The rank of this node along the {A,B,C} dimensions. int rankA_, rankB_, rankC_; /// The first grid point that this node is responsible for in the {A,B,C} dimensions. int firstA_, firstB_, firstC_; /// The grid point beyond the last point that this this node is responsible for in the {A,B,C} dimensions. int lastA_, lastB_, lastC_; /// The {X,Y,Z} dimensions of the locally owned chunk of the grid. int myDimA_, myDimB_, myDimC_; /// The subsets of a given dimension to be processed when doing a transform along another dimension. int subsetOfCAlongA_, subsetOfCAlongB_, subsetOfBAlongC_; /// The size of a cache line, in units of the size of the Real type, to allow better memory allocation policies. Real cacheLineSizeInReals_; /// The current unit cell parameters. Real cellA_, cellB_, cellC_, cellAlpha_, cellBeta_, cellGamma_; /// Whether the unit cell parameters have been changed, invalidating cached gF quantities. bool unitCellHasChanged_; /// Whether the kappa has been changed, invalidating kappa-dependent quantities. bool kappaHasChanged_; /// Whether any of the grid dimensions have changed. bool gridDimensionHasChanged_; /// Whether the spline order has changed. bool splineOrderHasChanged_; /// Whether the scale factor has changed. bool scaleFactorHasChanged_; /// Whether the power of R has changed. bool rPowerHasChanged_; /// Whether the parallel node setup has changed in any way. bool numNodesHasChanged_; /// The type of alignment scheme used for the lattice vectors. LatticeType latticeType_; /// Communication buffers for MPI parallelism. helpme::vector<Complex> workSpace1_, workSpace2_; /// FFTW wrappers to help with transformations in the {A,B,C} dimensions. FFTWWrapper<Real> fftHelperA_, fftHelperB_, fftHelperC_; /// The list of atoms, and their fractional coordinates, that will contribute to this node. std::vector<std::tuple<int, Real, Real, Real>> atomList_; /// The cached list of splines, which is stored as a member to make it persistent. std::vector<SplineCacheEntry<Real>> splineCache_; /*! * \brief A simple helper to compute factorials. * \param n the number whose factorial is to be taken. * \return n! */ unsigned int factorial(unsigned int n) { unsigned int ret = 1; for (unsigned int i = 1; i <= n; ++i) ret *= i; return ret; } /*! * \brief makeGridIterator makes an iterator over the spline values that contribute to this node's grid * in a given Cartesian dimension. The iterator is of the form (grid point, spline index) and is * sorted by increasing grid point, for cache efficiency. * \param dimension the dimension of the grid in the Cartesian dimension of interest. * \param first the first grid point in the Cartesian dimension to be handled by this node. * \param last the element past the last grid point in the Cartesian dimension to be handled by this node. * \return the vector of spline iterators for each starting grid point. */ GridIterator makeGridIterator(int dimension, int first, int last) const { GridIterator gridIterator; for (int gridStart = 0; gridStart < dimension; ++gridStart) { std::vector<std::pair<short, short>> splineIterator(splineOrder_); splineIterator.clear(); for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) { int gridPoint = (splineIndex + gridStart) % dimension; if (gridPoint >= first && gridPoint < last) splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex)); } splineIterator.shrink_to_fit(); std::sort(splineIterator.begin(), splineIterator.end()); gridIterator.push_back(splineIterator); } gridIterator.shrink_to_fit(); return gridIterator; } /*! Make sure that the iterator over AM components is up to date. * \param angMom the angular momentum required for the iterator over multipole components. */ void updateAngMomIterator(int parameterAngMom) { auto L = parameterAngMom; size_t expectedNTerms = nCartesian(L); if (angMomIterator_.size() >= expectedNTerms) return; angMomIterator_.resize(expectedNTerms); permutations_.resize(expectedNTerms); for (short l = 0, count = 0; l <= L; ++l) { for (short lz = 0; lz <= l; ++lz) { for (short ly = 0; ly <= l - lz; ++ly) { short lx = l - ly - lz; angMomIterator_[count] = {{static_cast<short>(lx), static_cast<short>(ly), static_cast<short>(lz)}}; permutations_[count] = (Real)factorial(l) / (factorial(lx) * factorial(ly) * factorial(lz)); ++count; } } } } /*! * \brief updateInfluenceFunction builds the gF array cache, if the lattice vector has changed since the last * build of it. If the cell is unchanged, this does nothing. */ void updateInfluenceFunction() { if (unitCellHasChanged_ || kappaHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ || scaleFactorHasChanged_ || numNodesHasChanged_) { cacheInfluenceFunctionFxn_(dimA_, dimB_, dimC_, myComplexDimA_, myDimB_ / numNodesC_, rankA_ * myComplexDimA_, rankB_ * myDimB_ + rankC_ * myDimB_ / numNodesC_, scaleFactor_, cachedInfluenceFunction_, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], nThreads_); } } /*! * \brief filterAtomsAndBuildSplineCache builds a list of BSplines for only the atoms to be handled by this node. * \param splineDerivativeLevel the derivative level (parameter angular momentum + energy derivative level) of the * BSplines. \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. */ void filterAtomsAndBuildSplineCache(int splineDerivativeLevel, const RealMat &coords) { assertInitialized(); atomList_.clear(); size_t nAtoms = coords.nRows(); for (int atom = 0; atom < nAtoms; ++atom) { const Real *atomCoords = coords[atom]; constexpr float EPS = 1e-6; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; const auto &aGridIterator = gridIteratorA_[aStartingGridPoint]; const auto &bGridIterator = gridIteratorB_[bStartingGridPoint]; const auto &cGridIterator = gridIteratorC_[cStartingGridPoint]; if (aGridIterator.size() && bGridIterator.size() && cGridIterator.size()) atomList_.emplace_back(atom, aCoord, bCoord, cCoord); } // Now we know how many atoms we loop over the dense list, redefining nAtoms accordingly. // The first stage above is to get the number of atoms, so we can avoid calling push_back // and thus avoid the many memory allocations. If the cache is too small, grow it by a // certain scale factor to try and minimize allocations in a not-too-wasteful manner. nAtoms = atomList_.size(); if (splineCache_.size() < nAtoms) { size_t newSize = static_cast<size_t>(1.2 * nAtoms); for (int atom = splineCache_.size(); atom < newSize; ++atom) splineCache_.emplace_back(splineOrder_, splineDerivativeLevel); } for (int atomListNum = 0; atomListNum < nAtoms; ++atomListNum) { const auto &entry = atomList_[atomListNum]; const int absoluteAtomNumber = std::get<0>(entry); const Real aCoord = std::get<1>(entry); const Real bCoord = std::get<2>(entry); const Real cCoord = std::get<3>(entry); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; auto &atomSplines = splineCache_[atomListNum]; atomSplines.absoluteAtomNumber = absoluteAtomNumber; atomSplines.aSpline.update(aStartingGridPoint, dimA_ * aCoord - aStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.bSpline.update(bStartingGridPoint, dimB_ * bCoord - bStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.cSpline.update(cStartingGridPoint, dimC_ * cCoord - cStartingGridPoint, splineOrder_, splineDerivativeLevel); } } /*! * \brief Spreads parameters onto the grid for a single atom * \param atom the absolute atom number. * \param realGrid pointer to the array containing the grid in CBA order * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode */ void spreadParametersImpl(const int &atom, Real *realGrid, const int &nComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, const RealMat &parameters) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); for (int component = 0; component < nComponents; ++component) { const auto &quanta = angMomIterator_[component]; Real param = parameters(atom, component); const Real *splineValsA = splineA[quanta[0]]; const Real *splineValsB = splineB[quanta[1]]; const Real *splineValsC = splineC[quanta[2]]; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; Real cValP = param * splineValsC[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; Real cbValP = cValP * splineValsB[bPoint.second]; Real *cbRow = realGrid + cPoint.first * myDimB_ * myDimA_ + bPoint.first * myDimA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, specialized for zero parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameter the list of parameter associated with the given atom. * \param forces a 3 vector of the forces for this atom, ordered in memory as {Fx, Fy, Fz}. */ void probeGridImpl(const Real *potentialGrid, const Spline &splineA, const Spline &splineB, const Spline &splineC, const Real &parameter, Real *forces) const { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; // We unpack the vector to raw pointers, as profiling shows that using range based for loops over vectors // causes a signficant penalty in the innermost loop, primarily due to checking the loop stop condition. int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); const Real *splineStartA0 = splineA[0]; const Real *splineStartB0 = splineB[0]; const Real *splineStartC0 = splineC[0]; const Real *splineStartA1 = splineStartA0 + splineOrder_; const Real *splineStartB1 = splineStartB0 + splineOrder_; const Real *splineStartC1 = splineStartC0 + splineOrder_; Real Ex = 0, Ey = 0, Ez = 0; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; const Real &splineC0 = splineStartC0[cPoint.second]; const Real &splineC1 = splineStartC1[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; const Real &splineB0 = splineStartB0[bPoint.second]; const Real &splineB1 = splineStartB1[bPoint.second]; const Real *cbRow = potentialGrid + cPoint.first * myDimA_ * myDimB_ + bPoint.first * myDimA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; const Real &splineA0 = splineStartA0[aPoint.second]; const Real &splineA1 = splineStartA1[aPoint.second]; const Real &gridVal = cbRow[aPoint.first]; Ey += gridVal * splineA0 * splineB1 * splineC0; Ez += gridVal * splineA0 * splineB0 * splineC1; Ex += gridVal * splineA1 * splineB0 * splineC0; } } } forces[0] -= parameter * (scaledRecVecs_[0][0] * Ex + scaledRecVecs_[0][1] * Ey + scaledRecVecs_[0][2] * Ez); forces[1] -= parameter * (scaledRecVecs_[1][0] * Ex + scaledRecVecs_[1][1] * Ey + scaledRecVecs_[1][2] * Ez); forces[2] -= parameter * (scaledRecVecs_[2][0] * Ex + scaledRecVecs_[2][1] * Ey + scaledRecVecs_[2][2] * Ez); } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nPotentialComponents the number of components in the potential and its derivatives with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nPotentialComponents, to store the fractional potential. * N.B. Make sure that updateAngMomIterator() has been called first with the appropriate derivative * level for the requested potential derivatives. */ void probeGridImpl(const Real *potentialGrid, const int &nPotentialComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; const Real *splineStartA = splineA[0]; const Real *splineStartB = splineB[0]; const Real *splineStartC = splineC[0]; for (const auto &cPoint : cGridIterator) { for (const auto &bPoint : bGridIterator) { const Real *cbRow = potentialGrid + cPoint.first * myDimA_ * myDimB_ + bPoint.first * myDimA_; for (const auto &aPoint : aGridIterator) { Real gridVal = cbRow[aPoint.first]; for (int component = 0; component < nPotentialComponents; ++component) { const auto &quanta = angMomIterator_[component]; const Real *splineValsA = splineStartA + quanta[0] * splineOrder_; const Real *splineValsB = splineStartB + quanta[1] * splineOrder_; const Real *splineValsC = splineStartC + quanta[2] * splineOrder_; phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] * splineValsC[cPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param atom the absolute atom number. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nForceComponents, to store the fractional potential. * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGridImpl(const int &atom, const Real *potentialGrid, const int &nComponents, const int &nForceComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr, const RealMat &parameters, Real *forces) { std::fill(phiPtr, phiPtr + nForceComponents, 0); probeGridImpl(potentialGrid, nForceComponents, splineA, splineB, splineC, phiPtr); Real fracForce[3] = {0, 0, 0}; for (int component = 0; component < nComponents; ++component) { Real param = parameters(atom, component); const auto &quanta = angMomIterator_[component]; short lx = quanta[0]; short ly = quanta[1]; short lz = quanta[2]; fracForce[0] -= param * phiPtr[cartAddress(lx + 1, ly, lz)]; fracForce[1] -= param * phiPtr[cartAddress(lx, ly + 1, lz)]; fracForce[2] -= param * phiPtr[cartAddress(lx, ly, lz + 1)]; } forces[0] += scaledRecVecs_[0][0] * fracForce[0] + scaledRecVecs_[0][1] * fracForce[1] + scaledRecVecs_[0][2] * fracForce[2]; forces[1] += scaledRecVecs_[1][0] * fracForce[0] + scaledRecVecs_[1][1] * fracForce[1] + scaledRecVecs_[1][2] * fracForce[2]; forces[2] += scaledRecVecs_[2][0] * fracForce[0] + scaledRecVecs_[2][1] * fracForce[1] + scaledRecVecs_[2][2] * fracForce[2]; } /*! * \brief assertInitialized makes sure that setup() has been called before running any calculations. */ void assertInitialized() const { if (!rPower_) throw std::runtime_error( "Either setup(...) or setup_parallel(...) must be called before computing anything."); } /*! * \brief makeBSplines construct the {x,y,z} B-Splines. * \param atomCoords a 3-vector containing the atom's coordinates. * \param derivativeLevel level of derivative needed for the splines. * \return a 3-tuple containing the {x,y,z} B-splines. */ std::tuple<Spline, Spline, Spline> makeBSplines(const Real *atomCoords, short derivativeLevel) const { // Subtract a tiny amount to make sure we're not exactly on the rightmost (excluded) // grid point. The calculation is translationally invariant, so this is valid. constexpr float EPS = 1e-6f; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; Real aDistanceFromGridPoint = dimA_ * aCoord - aStartingGridPoint; Real bDistanceFromGridPoint = dimB_ * bCoord - bStartingGridPoint; Real cDistanceFromGridPoint = dimC_ * cCoord - cStartingGridPoint; return std::make_tuple(Spline(aStartingGridPoint, aDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(bStartingGridPoint, bDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(cStartingGridPoint, cDistanceFromGridPoint, splineOrder_, derivativeLevel)); } /*! * \brief sanityChecks just makes sure that inputs have consistent dimensions, and that prerequisites are * initialized. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). * \param parameters the input parameters. * \param coordinates the input coordinates. */ void sanityChecks(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { assertInitialized(); if (parameters.nRows() == 0) throw std::runtime_error("Parameters have not been set yet! Call setParameters(...) before runPME(...);"); if (coordinates.nRows() == 0) throw std::runtime_error( "Coordinates have not been set yet! Call setCoordinates(...) before runPME(...);"); if (boxVecs_.isNearZero()) throw std::runtime_error( "Lattice vectors have not been set yet! Call setLatticeVectors(...) before runPME(...);"); if (coordinates.nRows() != parameters.nRows()) throw std::runtime_error( "Inconsistent number of coordinates and parameters; there should be nAtoms of each."); if (parameters.nCols() != nCartesian(parameterAngMom)) throw std::runtime_error( "Mismatch in the number of parameters provided and the parameter angular momentum"); } /*! * \brief convolveEVImpl performs the reciprocal space convolution, returning the energy. We opt to not cache * this the same way as the non-virial version because it's safe to assume that if the virial is requested * the box is likely to change, which renders the cache useless. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param nx the grid dimension in the x direction. * \param ny the grid dimension in the y direction. * \param nz the grid dimension in the z direction. * \param myNx the subset of the grid in the x direction to be handled by this node. * \param myNy the subset of the grid in the y direction to be handled by this node. * \param startX the starting grid point handled by this node in the X direction. * \param startY the starting grid point handled by this node in the Y direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the reciprocal space energy. */ template <int rPower> static Real convolveEVImpl(int nx, int ny, int nz, int myNx, int myNy, int startX, int startY, Real scaleFactor, Complex *gridPtr, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, RealMat &virial, int nThreads) { Real energy = 0; bool nodeZero = startX == 0 && startY == 0; if (rPower > 3 && nodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor * M_PI * sqrtPi * pow(kappa, rPower - 3) / ((rPower - 3) * gammaComputer<Real, rPower>::value * volume); energy += prefac * std::norm(gridPtr[0]); } // Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above. if (nodeZero) gridPtr[0] = Complex(0, 0); std::vector<Real> xMVals(myNx), yMVals(myNy), zMVals(nz); // Iterators to conveniently map {X,Y,Z} grid location to m_{X,Y,Z} value, where -1/2 << m/dim < 1/2. for (int kx = 0; kx < myNx; ++kx) xMVals[kx] = startX + (kx + startX >= (nx + 1) / 2 ? kx - nx : kx); for (int ky = 0; ky < myNy; ++ky) yMVals[ky] = startY + (ky + startY >= (ny + 1) / 2 ? ky - ny : ky); for (int kz = 0; kz < nz; ++kz) zMVals[kz] = kz >= (nz + 1) / 2 ? kz - nz : kz; Real bPrefac = M_PI * M_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(M_PI, rPower - 1) / (sqrtPi * gammaComputer<Real, rPower>::value * volume); int halfNx = nx / 2 + 1; size_t nxz = myNx * nz; Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0; const Real *boxPtr = boxInv[0]; const Real *xMPtr = xMVals.data(); const Real *yMPtr = yMVals.data(); const Real *zMPtr = zMVals.data(); size_t nyxz = myNy * nxz; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / nz; short kz = xz % nz; // We only loop over the first nx/2+1 x values; this // accounts for the "missing" complex conjugate values. Real permPrefac = kx + startX != 0 && kx + startX != halfNx - 1 ? 2 : 1; const Real &mx = xMPtr[kx]; const Real &my = yMPtr[ky]; const Real &mz = zMPtr[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared); Real eGamma = std::get<0>(gammas); Real vGamma = std::get<1>(gammas); Complex &gridVal = gridPtr[yxz]; Real structFacNorm = std::norm(gridVal); Real totalPrefac = volPrefac * mTerm * yMods[ky + startY] * xMods[kx + startX] * zMods[kz]; Real influenceFunction = totalPrefac * eGamma; gridVal *= influenceFunction; Real eTerm = permPrefac * influenceFunction * structFacNorm; Real vTerm = permPrefac * vGamma * totalPrefac / mNormSq * structFacNorm; energy += eTerm; Vxx += vTerm * mVecX * mVecX; Vxy += vTerm * mVecX * mVecY; Vyy += vTerm * mVecY * mVecY; Vxz += vTerm * mVecX * mVecZ; Vyz += vTerm * mVecY * mVecZ; Vzz += vTerm * mVecZ * mVecZ; } energy /= 2; virial[0][0] -= Vxx - energy; virial[0][1] -= Vxy; virial[0][2] -= Vyy - energy; virial[0][3] -= Vxz; virial[0][4] -= Vyz; virial[0][5] -= Vzz - energy; return energy; } /*! * \brief cacheInfluenceFunctionImpl computes the influence function used in convolution, for later use. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param nx the grid dimension in the x direction. * \param ny the grid dimension in the y direction. * \param nz the grid dimension in the z direction. * \param myNx the subset of the grid in the x direction to be handled by this node. * \param myNy the subset of the grid in the y direction to be handled by this node. * \param startX the starting grid point handled by this node in the X direction. * \param startY the starting grid point handled by this node in the Y direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the energy for the m=0 term. */ template <int rPower> static void cacheInfluenceFunctionImpl(int nx, int ny, int nz, int myNx, int myNy, int startX, int startY, Real scaleFactor, RealVec &influenceFunction, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, int nThreads) { bool nodeZero = startX == 0 && startY == 0; size_t nxz = myNx * nz; size_t nyxz = myNy * nxz; influenceFunction.resize(nyxz); Real *gridPtr = influenceFunction.data(); if (nodeZero) gridPtr[0] = 0; std::vector<Real> xMVals(myNx), yMVals(myNy), zMVals(nz); // Iterators to conveniently map {X,Y,Z} grid location to m_{X,Y,Z} value, where -1/2 << m/dim < 1/2. for (int kx = 0; kx < myNx; ++kx) xMVals[kx] = startX + (kx + startX >= (nx + 1) / 2 ? kx - nx : kx); for (int ky = 0; ky < myNy; ++ky) yMVals[ky] = startY + (ky + startY >= (ny + 1) / 2 ? ky - ny : ky); for (int kz = 0; kz < nz; ++kz) zMVals[kz] = kz >= (nz + 1) / 2 ? kz - nz : kz; Real bPrefac = M_PI * M_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(M_PI, rPower - 1) / (sqrtPi * gammaComputer<Real, rPower>::value * volume); const Real *boxPtr = boxInv[0]; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / nz; short kz = xz % nz; Real mx = (Real)xMVals[kx]; Real my = (Real)yMVals[ky]; Real mz = (Real)zMVals[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; Real incompleteGammaTerm = incompleteGammaComputer<Real, 3 - rPower>::compute(bSquared); gridPtr[yxz] = volPrefac * incompleteGammaTerm * mTerm * yMods[ky + startY] * xMods[kx + startX] * zMods[kz]; } } /*! * \brief dirEImpl computes the kernel for the direct energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real dirEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return gammaTerm / denominator; } /*! * \brief dirEFImpl computes the kernels for the direct energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> dirEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = gammaTerm / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief adjEImpl computes the kernel for the adjusted energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real adjEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return (gammaTerm - 1) / denominator; } /*! * \brief adjEFImpl computes the kernels for the adjusted energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> adjEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = (gammaTerm - 1) / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief slfEImpl computes the self energy due to particles feeling their own potential. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, * etc.). * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof * (e.g. the 1 / [4 pi epslion0] for Coulomb calculations). * \return the self energy. N.B. there is no self force associated with this term. */ template <int rPower> static Real slfEImpl(int parameterAngMom, const RealMat &parameters, Real kappa, Real scaleFactor) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); size_t nAtoms = parameters.nRows(); Real prefac = -scaleFactor * std::pow(kappa, rPower) / (rPower * gammaComputer<Real, rPower>::value); Real sumCoefs = 0; for (size_t atom = 0; atom < nAtoms; ++atom) { sumCoefs += parameters(atom, 0) * parameters(atom, 0); } return prefac * sumCoefs; } /*! * \brief common_init sets up information that is common to serial and parallel runs. */ void common_init(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) { kappaHasChanged_ = kappa != kappa_; rPowerHasChanged_ = rPower_ != rPower; gridDimensionHasChanged_ = dimA_ != dimA || dimB_ != dimB || dimC_ != dimC; splineOrderHasChanged_ = splineOrder_ != splineOrder; scaleFactorHasChanged_ = scaleFactor_ != scaleFactor; if (kappaHasChanged_ || rPowerHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ || scaleFactorHasChanged_ || requestedNumberOfThreads_ != nThreads) { rPower_ = rPower; dimA_ = dimA; dimB_ = dimB; dimC_ = dimC; complexDimA_ = dimA / 2 + 1; myComplexDimA_ = myDimA_ / 2 + 1; splineOrder_ = splineOrder; requestedNumberOfThreads_ = nThreads; #ifdef _OPENMP nThreads_ = nThreads ? nThreads : omp_get_max_threads(); #else nThreads_ = 1; #endif scaleFactor_ = scaleFactor; kappa_ = kappa; cacheLineSizeInReals_ = static_cast<Real>(sysconf(_SC_PAGESIZE) / sizeof(Real)); // Helpers to perform 1D FFTs along each dimension. fftHelperA_ = FFTWWrapper<Real>(dimA_); fftHelperB_ = FFTWWrapper<Real>(dimB_); fftHelperC_ = FFTWWrapper<Real>(dimC_); // Grid iterators to correctly wrap the grid when using splines. gridIteratorA_ = makeGridIterator(dimA_, firstA_, lastA_); gridIteratorB_ = makeGridIterator(dimB_, firstB_, lastB_); gridIteratorC_ = makeGridIterator(dimC_, firstC_, lastC_); // Fourier space spline norms. Spline spline = Spline(0, 0, splineOrder_, 0); splineModA_ = spline.invSplineModuli(dimA_); splineModB_ = spline.invSplineModuli(dimB_); splineModC_ = spline.invSplineModuli(dimC_); // Set up function pointers by instantiating the appropriate evaluation functions. We could add many more // entries by default here, but don't right now to avoid code bloat. To add an extra rPower kernel is a // trivial cut and paste exercise; just add a new line with the desired 1/R power as the macro's argument. switch (rPower) { ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(1); ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(6); default: std::string msg("Bad rPower requested. To fix this, add the appropriate entry in"); msg += __FILE__; msg += ", line number "; msg += std::to_string(__LINE__ - 5); throw std::runtime_error(msg.c_str()); break; } subsetOfCAlongA_ = myDimC_ / numNodesA_; subsetOfCAlongB_ = myDimC_ / numNodesB_; subsetOfBAlongC_ = myDimB_ / numNodesC_; workSpace1_ = helpme::vector<Complex>(myDimC_ * myComplexDimA_ * myDimB_); workSpace2_ = helpme::vector<Complex>(myDimC_ * myComplexDimA_ * myDimB_); } } public: PMEInstance() : dimA_(0), dimB_(0), dimC_(0), splineOrder_(0), requestedNumberOfThreads_(-1), rPower_(0), scaleFactor_(0), kappa_(0), boxVecs_(3, 3), recVecs_(3, 3), scaledRecVecs_(3, 3), numNodesA_(1), numNodesB_(1), numNodesC_(1), cellA_(0), cellB_(0), cellC_(0), cellAlpha_(0), cellBeta_(0), cellGamma_(0) {} /*! * \brief cellVolume Compute the volume of the unit cell. * \return volume in units consistent with those used to define the lattice vectors. */ Real cellVolume() { return boxVecs_(0, 0) * boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(0, 0) * boxVecs_(1, 2) * boxVecs_(2, 1) + boxVecs_(0, 1) * boxVecs_(1, 2) * boxVecs_(2, 0) - boxVecs_(0, 1) * boxVecs_(1, 0) * boxVecs_(2, 2) + boxVecs_(0, 2) * boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(0, 2) * boxVecs_(1, 1) * boxVecs_(2, 0); } /*! * \brief Sets the unit cell lattice vectors, with units consistent with those used to specify coordinates. * \param A the A lattice parameter in units consistent with the coordinates. * \param B the B lattice parameter in units consistent with the coordinates. * \param C the C lattice parameter in units consistent with the coordinates. * \param alpha the alpha lattice parameter in degrees. * \param beta the beta lattice parameter in degrees. * \param gamma the gamma lattice parameter in degrees. * \param latticeType how to arrange the lattice vectors. Options are * ShapeMatrix: enforce a symmetric representation of the lattice vectors [c.f. S. Nosé and M. L. Klein, * Mol. Phys. 50 1055 (1983)] particularly appendix C. * XAligned: make the A vector coincide with the X axis, the B vector fall in the XY plane, and the C vector * take the appropriate alignment to completely define the system. */ void setLatticeVectors(Real A, Real B, Real C, Real alpha, Real beta, Real gamma, LatticeType latticeType) { if (A != cellA_ || B != cellB_ || C != cellC_ || alpha != cellAlpha_ || beta != cellBeta_ || gamma != cellGamma_ || latticeType != latticeType_) { if (latticeType == LatticeType::ShapeMatrix) { RealMat HtH(3, 3); HtH(0, 0) = A * A; HtH(1, 1) = B * B; HtH(2, 2) = C * C; const float TOL = 1e-4f; // Check for angles very close to 90, to avoid noise from the eigensolver later on. HtH(0, 1) = HtH(1, 0) = std::abs(gamma - 90) < TOL ? 0 : A * B * cos(M_PI * gamma / 180); HtH(0, 2) = HtH(2, 0) = std::abs(beta - 90) < TOL ? 0 : A * C * cos(M_PI * beta / 180); HtH(1, 2) = HtH(2, 1) = std::abs(alpha - 90) < TOL ? 0 : B * C * cos(M_PI * alpha / 180); auto eigenTuple = HtH.diagonalize(); RealMat evalsReal = std::get<0>(eigenTuple); RealMat evecs = std::get<1>(eigenTuple); for (int i = 0; i < 3; ++i) evalsReal(i, 0) = sqrt(evalsReal(i, 0)); boxVecs_.setZero(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { boxVecs_(i, j) += evecs(i, k) * evecs(j, k) * evalsReal(k, 0); } } } recVecs_ = boxVecs_.inverse(); } else if (latticeType == LatticeType::XAligned) { boxVecs_(0, 0) = A; boxVecs_(0, 1) = 0; boxVecs_(0, 2) = 0; boxVecs_(1, 0) = B * cos(M_PI / 180 * gamma); boxVecs_(1, 1) = B * sin(M_PI / 180 * gamma); boxVecs_(1, 2) = 0; boxVecs_(2, 0) = C * cos(M_PI / 180 * beta); boxVecs_(2, 1) = (B * C * cos(M_PI / 180 * alpha) - boxVecs_(2, 0) * boxVecs_(1, 0)) / boxVecs_(1, 1); boxVecs_(2, 2) = sqrt(C * C - boxVecs_(2, 0) * boxVecs_(2, 0) - boxVecs_(2, 1) * boxVecs_(2, 1)); } else { throw std::runtime_error("Unknown lattice type in setLatticeVectors"); } recVecs_ = boxVecs_.inverse(); scaledRecVecs_ = recVecs_.clone(); scaledRecVecs_.row(0) *= dimA_; scaledRecVecs_.row(1) *= dimB_; scaledRecVecs_.row(2) *= dimC_; cellA_ = A; cellB_ = B; cellC_ = C; cellAlpha_ = alpha; cellBeta_ = beta; cellGamma_ = gamma; latticeType_ = latticeType; unitCellHasChanged_ = true; } else { unitCellHasChanged_ = false; } } /*! * \brief Performs the forward 3D FFT of the discretized parameter grid. * \param realGrid the array of discretized parameters (stored in CBA order, * with A being the fast running index) to be transformed. * \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order. */ Complex *forwardTransform(Real *realGrid) { Real *realCBA; Complex *buffer1, *buffer2; if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) { realCBA = reinterpret_cast<Real *>(workSpace2_.data()); buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { realCBA = reinterpret_cast<Real *>(workSpace2_.data()); buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } #if HAVE_MPI == 1 if (numNodesA_ > 1) { // Communicate A along columns mpiCommunicatorA_->allToAll(realGrid, realCBA, subsetOfCAlongA_ * myDimA_ * myDimB_); // Resort the data to end up with realGrid holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *outC = realGrid + c * myDimB_ * dimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *inPtr = realCBA + (chunk * subsetOfCAlongA_ + c) * myDimB_ * myDimA_ + b * myDimA_; std::copy(inPtr, inPtr + myDimA_, outC + b * dimA_ + chunk * myDimA_); } } } } #endif // Each parallel node allocates buffers of length dimA/(2 numNodesA)+1 for A, leading to a total of // dimA/2 + numNodesA = complexDimA+numNodesA-1 if dimA is even // and // numNodesA (dimA-1)/2 + numNodesA = complexDimA + numNodesA/2-1 if dimA is odd // We just allocate the larger size here, remembering that the final padding values on the last node // will all be allocated to zero and will not contribute to the final answer. helpme::vector<Complex> buffer(complexDimA_ + numNodesA_ - 1); // A transform, with instant sort to CAB ordering for each local block auto scratch = buffer.data(); for (int c = 0; c < subsetOfCAlongA_; ++c) { for (int b = 0; b < myDimB_; ++b) { Real *gridPtr = realGrid + c * myDimB_ * dimA_ + b * dimA_; fftHelperA_.transform(gridPtr, scratch); for (int chunk = 0; chunk < numNodesA_; ++chunk) { for (int a = 0; a < myComplexDimA_; ++a) { buffer1[(chunk * subsetOfCAlongA_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_ + b] = scratch[chunk * myComplexDimA_ + a]; } } } } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexDimA_ * myDimB_); std::swap(buffer1, buffer2); } // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, cPtr + a * dimB_ + chunk * myDimB_); } } } } #endif // B transform for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { fftHelperB_.transform(cPtr + a * dimB_, FFTW_FORWARD); } } #if HAVE_MPI == 1 if (numNodesB_ > 1) { for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *zPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = zPtr + a * dimB_ + chunk * myDimB_; Complex *outPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, outPtr); } } } // Communicate B back to blocks mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); } #endif // sort local blocks from CAB to BAC order for (int b = 0; b < myDimB_; ++b) { for (int a = 0; a < myComplexDimA_; ++a) { for (int c = 0; c < myDimC_; ++c) { buffer2[b * myComplexDimA_ * myDimC_ + a * myDimC_ + c] = buffer1[c * myComplexDimA_ * myDimB_ + a * myDimB_ + b]; } } } #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C along columns mpiCommunicatorC_->allToAll(buffer2, buffer1, subsetOfBAlongC_ * myComplexDimA_ * myDimC_); for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *outPtrB = buffer2 + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *outPtrBA = outPtrB + a * dimC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexDimA_ * myDimC_ + a * myDimC_; std::copy(inPtr, inPtr + myDimC_, outPtrBA + chunk * myDimC_); } } } } #endif // C transform for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *outPtrB = buffer2 + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *outPtrBA = outPtrB + a * dimC_; fftHelperC_.transform(outPtrBA, FFTW_FORWARD); } } return buffer2; } /*! * \brief Performs the inverse 3D FFT. * \param convolvedGrid the complex array of discretized parameters convolved with the influence function * (stored in BAC order, with C being the fast running index) to be transformed. * \return Pointer to the potential grid, which is stored in one of the buffers in CBA order. */ Real *inverseTransform(Complex *convolvedGrid) { Complex *buffer1, *buffer2; // Setup scratch, taking care not to overwrite the convolved grid. if (convolvedGrid == workSpace1_.data()) { buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { buffer1 = workSpace1_.data(); buffer2 = workSpace2_.data(); } // C transform for (int y = 0; y < subsetOfBAlongC_; ++y) { for (int x = 0; x < myComplexDimA_; ++x) { int yx = y * myComplexDimA_ * dimC_ + x * dimC_; fftHelperC_.transform(convolvedGrid + yx, FFTW_BACKWARD); } } #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C back to blocks for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *inPtrB = convolvedGrid + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *inPtrBA = inPtrB + a * dimC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtrBAC = inPtrBA + chunk * myDimC_; Complex *outPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexDimA_ * myDimC_ + a * myDimC_; std::copy(inPtrBAC, inPtrBAC + myDimC_, outPtr); } } } mpiCommunicatorC_->allToAll(buffer1, buffer2, subsetOfBAlongC_ * myComplexDimA_ * myDimC_); } #endif // sort local blocks from BAC to CAB order for (int B = 0; B < myDimB_; ++B) { for (int A = 0; A < myComplexDimA_; ++A) { for (int C = 0; C < myDimC_; ++C) { buffer1[C * myComplexDimA_ * myDimB_ + A * myDimB_ + B] = buffer2[B * myComplexDimA_ * myDimC_ + A * myDimC_ + C]; } } } #if HAVE_MPI == 1 // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, cPtr + a * dimB_ + chunk * myDimB_); } } } } #endif // B transform with instant sort of local blocks from CAB -> CBA order for (int c = 0; c < subsetOfCAlongB_; ++c) { for (int a = 0; a < myComplexDimA_; ++a) { int cx = c * myComplexDimA_ * dimB_ + a * dimB_; fftHelperB_.transform(buffer1 + cx, FFTW_BACKWARD); for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { int cb = (chunk * subsetOfCAlongB_ + c) * myDimB_ * myComplexDimA_ + b * myComplexDimA_; buffer2[cb + a] = buffer1[cx + chunk * myDimB_ + b]; } } } } #if HAVE_MPI == 1 // Communicate B back to blocks if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); } else { std::swap(buffer1, buffer2); } // Communicate A along rows if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Complex *cPtr = buffer1 + c * myDimB_ * complexDimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongA_ + c) * myComplexDimA_ * myDimB_ + b * myComplexDimA_; std::copy(inPtr, inPtr + myComplexDimA_, cPtr + b * complexDimA_ + chunk * myComplexDimA_); } } } } #else std::swap(buffer1, buffer2); #endif // A transform Real *realGrid = reinterpret_cast<Real *>(buffer2); for (int cb = 0; cb < subsetOfCAlongA_ * myDimB_; ++cb) { fftHelperA_.transform(buffer1 + cb * complexDimA_, realGrid + cb * dimA_); } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { Real *realGrid2 = reinterpret_cast<Real *>(buffer1); for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *cPtr = realGrid + c * myDimB_ * dimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *outPtr = realGrid2 + (chunk * subsetOfCAlongA_ + c) * myDimB_ * myDimA_ + b * myDimA_; Real *inPtr = cPtr + b * dimA_ + chunk * myDimA_; std::copy(inPtr, inPtr + myDimA_, outPtr); } } } mpiCommunicatorA_->allToAll(realGrid2, realGrid, subsetOfCAlongA_ * myDimB_ * myDimA_); } #endif return realGrid; } /*! * \brief convolveE A wrapper to determine the correct convolution function to call. * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \return the reciprocal space energy. */ Real convolveE(Complex *transformedGrid) { updateInfluenceFunction(); size_t myNy = myDimB_ / numNodesC_; size_t myNx = myComplexDimA_; size_t nz = dimC_; size_t nxz = myNx * nz; size_t nyxz = myNy * nxz; size_t halfNx = dimA_ / 2 + 1; bool iAmNodeZero = (rankA_ == 0 && rankB_ == 0 && rankC_ == 0); Real *influenceFunction = cachedInfluenceFunction_.data(); int startX = rankA_ * myComplexDimA_; Real energy = 0; if (rPower_ > 3 && iAmNodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor_ * M_PI * sqrtPi * pow(kappa_, rPower_ - 3) / ((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume()); energy += prefac * std::norm(transformedGrid[0]); } transformedGrid[0] = Complex(0, 0); #pragma omp parallel for reduction(+ : energy) num_threads(nThreads_) for (size_t yxz = 0; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; int kx = startX + xz / nz; // We only loop over the first nx/2+1 x values; this // accounts for the "missing" complex conjugate values. Real permPrefac = kx != 0 && kx != halfNx - 1 ? 2 : 1; Real structFactorNorm = std::norm(transformedGrid[yxz]); energy += permPrefac * structFactorNorm * influenceFunction[yxz]; transformedGrid[yxz] *= influenceFunction[yxz]; } return energy / 2; } /*! * \brief convolveEV A wrapper to determine the correct convolution function to call, including virial. * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real convolveEV(Complex *transformedGrid, RealMat &virial) { return convolveEVFxn_(dimA_, dimB_, dimC_, myComplexDimA_, myDimB_ / numNodesC_, rankA_ * myComplexDimA_, rankB_ * myDimB_ + rankC_ * myDimB_ / numNodesC_, scaleFactor_, transformedGrid, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], virial, nThreads_); } /*! * \brief Spread the parameters onto the charge grid. Generally this shouldn't be called; * use the various computeE() methods instead. This the more efficient version that filters * the atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return realGrid the array of discretized parameters (stored in CBA order). */ Real *spreadParameters(int parameterAngMom, const RealMat &parameters) { Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); std::fill(workSpace1_.begin(), workSpace1_.end(), 0); updateAngMomIterator(parameterAngMom); size_t nAtoms = atomList_.size(); int nComponents = nCartesian(parameterAngMom); for (size_t relativeAtomNumber = 0; relativeAtomNumber < nAtoms; ++relativeAtomNumber) { const auto &entry = splineCache_[relativeAtomNumber]; const int &atom = entry.absoluteAtomNumber; const auto &splineA = entry.aSpline; const auto &splineB = entry.bSpline; const auto &splineC = entry.cSpline; spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, parameters); } return realGrid; } /*! * \brief Spread the parameters onto the charge grid. Generally this shouldn't be called; * use the various computeE() methods instead. This is the slower version of this call that recomputes * splines on demand and makes no assumptions about the integrity of the spline cache. * \param parameterAngMom the angular momentum of the parameters * (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return realGrid the array of discretized parameters (stored in CBA order). */ Real *spreadParameters(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); std::fill(workSpace1_.begin(), workSpace1_.end(), 0); updateAngMomIterator(parameterAngMom); int nComponents = nCartesian(parameterAngMom); size_t nAtoms = coordinates.nRows(); for (size_t atom = 0; atom < nAtoms; ++atom) { // Blindly reconstruct splines for this atom, assuming nothing about the validity of the cache. // Note that this incurs a somewhat steep cost due to repeated memory allocations. auto bSplines = makeBSplines(coordinates[atom], parameterAngMom); const auto &splineA = std::get<0>(bSplines); const auto &splineB = std::get<1>(bSplines); const auto &splineC = std::get<2>(bSplines); spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, parameters); } return realGrid; } /*! * \brief Probes the potential grid to get the forces. Generally this shouldn't be called; * use the various computeE() methods instead. This is the slower version of this call that recomputes * splines on demand and makes no assumptions about the integrity of the spline cache. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { updateAngMomIterator(parameterAngMom + 1); int nComponents = nCartesian(parameterAngMom); int nForceComponents = nCartesian(parameterAngMom + 1); RealMat fractionalPhis(1, nForceComponents); size_t nAtoms = parameters.nRows(); for (size_t atom = 0; atom < nAtoms; ++atom) { auto bSplines = makeBSplines(coordinates[atom], parameterAngMom + 1); auto splineA = std::get<0>(bSplines); auto splineB = std::get<1>(bSplines); auto splineC = std::get<2>(bSplines); probeGridImpl(atom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC, fractionalPhis[0], parameters, forces[atom]); } } /*! * \brief Probes the potential grid to get the forces. Generally this shouldn't be called; * use the various computeE() methods instead. This is the faster version that uses * the filtered atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat &parameters, RealMat &forces) { updateAngMomIterator(parameterAngMom + 1); int nComponents = nCartesian(parameterAngMom); int nForceComponents = nCartesian(parameterAngMom + 1); const Real *paramPtr = parameters[0]; // Find how many multiples of the cache line size are needed // to ensure that each thread hits a unique page. size_t rowSize = std::ceil(nForceComponents / cacheLineSizeInReals_) * cacheLineSizeInReals_; RealMat fractionalPhis(nThreads_, rowSize); size_t nAtoms = atomList_.size(); #pragma omp parallel for num_threads(nThreads_) for (size_t relativeAtomNumber = 0; relativeAtomNumber < nAtoms; ++relativeAtomNumber) { const auto &entry = splineCache_[relativeAtomNumber]; const int &atom = entry.absoluteAtomNumber; const auto &splineA = entry.aSpline; const auto &splineB = entry.bSpline; const auto &splineC = entry.cSpline; if (parameterAngMom) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 1; #endif Real *myScratch = fractionalPhis[threadID % nThreads_]; probeGridImpl(atom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC, myScratch, parameters, forces[atom]); } else { probeGridImpl(potentialGrid, splineA, splineB, splineC, paramPtr[atom], forces[atom]); } } } /*! * \brief computeESlf computes the Ewald self interaction energy. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return the self energy. */ Real computeESlf(int parameterAngMom, const RealMat &parameters) { assertInitialized(); return slfEFxn_(parameterAngMom, parameters, kappa_, scaleFactor_); } /*! * \brief computeEDir computes the direct space energy. This is provided mostly for debugging and testing * purposes; generally the host program should provide the pairwise interactions. \param pairList dense list of * atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular momentum of * the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the list of * parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter with * angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the * fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the direct space energy. */ Real computeEDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * dirEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFDir computes the direct space energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the direct space energy. */ Real computeEFDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVDir computes the direct space energy, force and virial. This is provided mostly for * debugging and testing purposes; generally the host program should provide the pairwise interactions. \param * pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom * the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param * parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). * For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the direct space energy. */ Real computeEFVDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief computeEAdj computes the adjusted real space energy which extracts the energy for excluded pairs that * is present in reciprocal space. This is provided mostly for debugging and testing purposes; generally the * host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the adjusted energy. */ Real computeEAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * adjEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFAdj computes the adjusted energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. \param pairList dense * list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular * momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the * list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter * with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and * the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the adjusted energy. */ Real computeEFAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVAdj computes the adjusted energy, forces and virial. This is provided mostly for debugging * and testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the adjusted energy. */ Real computeEFVAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param gridPoints the list of grid points at which the potential is needed; can be the same as the * coordinates. \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is * (minus) the field, etc. \param potential the array holding the potential. This is a matrix of dimensions * nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for * information about ordering of derivative components. N.B. this array is incremented with the potential, not * assigned, so take care to zero it first if only the current results are desired. */ void computePRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, const RealMat &gridPoints, int derivativeLevel, RealMat &potential) { sanityChecks(parameterAngMom, parameters, coordinates); updateAngMomIterator(std::max(parameterAngMom, derivativeLevel)); // Note: we're calling the version of spread parameters that computes its own splines here. // This is quite inefficient, but allow the potential to be computed at arbitrary locations by // simply regenerating splines on demand in the probing stage. If this becomes too slow, it's // easy to write some logic to check whether gridPoints and coordinates are the same, and // handle that special case using spline cacheing machinery for efficiency. auto realGrid = spreadParameters(parameterAngMom, parameters, coordinates); auto gridAddress = forwardTransform(realGrid); convolveE(gridAddress); const auto potentialGrid = inverseTransform(gridAddress); auto fracPotential = potential.clone(); int nPotentialComponents = nCartesian(derivativeLevel); size_t nPoints = gridPoints.nRows(); for (size_t point = 0; point < nPoints; ++point) { auto bSplines = makeBSplines(gridPoints[point], derivativeLevel); auto splineA = std::get<0>(bSplines); auto splineB = std::get<1>(bSplines); auto splineC = std::get<2>(bSplines); probeGridImpl(potentialGrid, nPotentialComponents, splineA, splineB, splineC, fracPotential[point]); } potential += cartesianTransform(derivativeLevel, scaledRecVecs_, fracPotential); } /*! * \brief Runs a PME reciprocal space calculation, computing energies. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \return the reciprocal space energy. */ Real computeERec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); filterAtomsAndBuildSplineCache(parameterAngMom, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridAddress = forwardTransform(realGrid); return convolveE(gridAddress); } /*! * \brief Runs a PME reciprocal space calculation, computing energies and forces. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridAddress = forwardTransform(realGrid); Real energy = convolveE(gridAddress); const auto potentialGrid = inverseTransform(gridAddress); probeGrid(potentialGrid, parameterAngMom, parameters, forces); return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing energies, forces and the virial. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFVRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridPtr = forwardTransform(realGrid); Real energy = convolveEV(gridPtr, virial); const auto potentialGrid = inverseTransform(gridPtr); probeGrid(potentialGrid, parameterAngMom, parameters, forces); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing the energy. The direct space * implementation here is not totally optimal, so this routine should primarily be used for testing and * debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN,jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeERec(parameterAngMom, parameters, coordinates); energy += computeESlf(parameterAngMom, parameters); energy += computeEDir(includedList, parameterAngMom, parameters, coordinates); energy += computeEAdj(excludedList, parameterAngMom, parameters, coordinates); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies and forces. The direct * space implementation here is not totally optimal, so this routine should primarily be used for testing * and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEFAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFRec(parameterAngMom, parameters, coordinates, forces); energy += computeESlf(parameterAngMom, parameters); energy += computeEFDir(includedList, parameterAngMom, parameters, coordinates, forces); energy += computeEFAdj(excludedList, parameterAngMom, parameters, coordinates, forces); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies, forces and virials. * The direct space implementation here is not totally optimal, so this routine should primarily * be used for testing and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the full PME energy. */ Real computeEFVAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFVRec(parameterAngMom, parameters, coordinates, forces, virial); energy += computeESlf(parameterAngMom, parameters); energy += computeEFVDir(includedList, parameterAngMom, parameters, coordinates, forces, virial); energy += computeEFVAdj(excludedList, parameterAngMom, parameters, coordinates, forces, virial); return energy; } /*! * \brief setup initializes this object for a PME calculation using only threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. */ void setup(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) { numNodesHasChanged_ = numNodesA_ != 1 || numNodesB_ != 1 || numNodesC_ != 1; numNodesA_ = numNodesB_ = numNodesC_ = 1; rankA_ = rankB_ = rankC_ = 0; firstA_ = firstB_ = firstC_ = 0; dimA = findGridSize(dimA, {1}); dimB = findGridSize(dimB, {1}); dimC = findGridSize(dimC, {1}); lastA_ = dimA; lastB_ = dimB; lastC_ = dimC; myDimA_ = dimA; myDimB_ = dimB; myDimC_ = dimC; common_init(rPower, kappa, splineOrder, dimA, dimB, dimC, scaleFactor, nThreads); } /*! * \brief setup initializes this object for a PME calculation using MPI parallism and threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are \param communicator the MPI communicator for the reciprocal space calcultion, which should already be * initialized. * \param numNodesA the number of nodes to be used for the A dimension. * \param numNodesB the number of nodes to be used for the B dimension. * \param numNodesC the number of nodes to be used for the C dimension. */ void setupParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads, const MPI_Comm &communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) { numNodesHasChanged_ = numNodesA_ != numNodesA || numNodesB_ != numNodesB || numNodesC_ != numNodesC; #if HAVE_MPI == 1 mpiCommunicator_ = std::unique_ptr<MPIWrapper<Real>>(new MPIWrapper<Real>(communicator, numNodesA, numNodesB, numNodesC)); switch (nodeOrder) { case (NodeOrder::ZYX): rankA_ = mpiCommunicator_->myRank_ % numNodesA; rankB_ = (mpiCommunicator_->myRank_ % (numNodesB * numNodesA)) / numNodesA; rankC_ = mpiCommunicator_->myRank_ / (numNodesB * numNodesA); mpiCommunicatorA_ = mpiCommunicator_->split(rankC_ * numNodesB + rankB_, rankA_); mpiCommunicatorB_ = mpiCommunicator_->split(rankC_ * numNodesA + rankA_, rankB_); mpiCommunicatorC_ = mpiCommunicator_->split(rankB_ * numNodesA + rankA_, rankC_); break; default: throw std::runtime_error("Unknown NodeOrder in setupParallel."); } numNodesA_ = numNodesA; numNodesB_ = numNodesB; numNodesC_ = numNodesC; dimA = findGridSize(dimA, {numNodesA}); dimB = findGridSize(dimB, {numNodesB * numNodesC}); dimC = findGridSize(dimC, {numNodesA * numNodesC, numNodesB * numNodesC}); myDimA_ = dimA / numNodesA; myDimB_ = dimB / numNodesB; myDimC_ = dimC / numNodesC; firstA_ = rankA_ * myDimA_; firstB_ = rankB_ * myDimB_; firstC_ = rankC_ * myDimC_; lastA_ = rankA_ == numNodesA ? dimA : (rankA_ + 1) * myDimA_; lastB_ = rankB_ == numNodesB ? dimB : (rankB_ + 1) * myDimB_; lastC_ = rankC_ == numNodesC ? dimC : (rankC_ + 1) * myDimC_; common_init(rPower, kappa, splineOrder, dimA, dimB, dimC, scaleFactor, nThreads); #else // Have MPI throw std::runtime_error( "setupParallel called, but helpme was not compiled with MPI. Make sure you compile with -DHAVE_MPI=1 " "in " "the list of compiler definitions."); #endif // Have MPI } }; } // Namespace helpme using PMEInstanceD = helpme::PMEInstance<double>; using PMEInstanceF = helpme::PMEInstance<float>; #else // C header #include <stddef.h> #if HAVE_MPI == 1 #include <mpi.h> #endif typedef enum { XAligned = 0, ShapeMatrix = 1 } LatticeType; typedef enum { ZYX = 0 } NodeOrder; typedef struct PMEInstance PMEInstance; extern struct PMEInstance *helpme_createD(); extern struct PMEInstance *helpme_createF(); extern void helpme_destroyD(struct PMEInstance *pme); extern void helpme_destroyF(struct PMEInstance *pme); extern void helpme_setupD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim, int bDim, int cDim, double scaleFactor, int nThreads); extern void helpme_setupF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim, int bDim, int cDim, float scaleFactor, int nThreads); #if HAVE_MPI == 1 extern void helpme_setup_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA, int dimB, int dimC, double scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); extern void helpme_setup_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA, int dimB, int dimC, float scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); #endif // HAVE_MPI extern void helpme_set_lattice_vectorsD(struct PMEInstance *pme, double A, double B, double C, double kappa, double beta, double gamma, LatticeType latticeType); extern void helpme_set_lattice_vectorsF(struct PMEInstance *pme, float A, float B, float C, float kappa, float beta, float gamma, LatticeType latticeType); extern double helpme_compute_E_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates); extern float helpme_compute_E_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates); extern double helpme_compute_EF_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces); extern float helpme_compute_EF_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces); extern double helpme_compute_EFV_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces, double *virial); extern float helpme_compute_EFV_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces, float *virial); extern void helpme_compute_P_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, size_t nGridPoints, double *gridPoints, int derivativeLevel, double *potential); extern void helpme_compute_P_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, size_t nGridPoints, float *gridPoints, int derivativeLevel, float *potential); #endif // C++/C #endif // Header guard
reduction-clauseModificado.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc,char **argv){ int i, n=20,a[n],suma=10; if(argc < 2){ fprintf(stderr,"Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if(n>20) { n=20; printf("n=%d",n); } for(i=0;i<n;i++) a[i] = i; #pragma omp parallel for reduction(+:suma) for(i=0;i<n;i++) suma += a[i]; printf("Tras 'parallel' suma=%d\n",suma); }
multibit_fmt_plug.c
/* * JtR format to crack password protected MultiBit Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * All credit goes to Christopher Gurnee for making this work possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_multibit; #elif FMT_REGISTERS_H john_register_one(&fmt_multibit); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "md5.h" #include "escrypt/crypto_scrypt.h" #include "jumbo.h" #include "memdbg.h" #include "unicode.h" #define FORMAT_NAME "MultiBit Wallet" #define FORMAT_LABEL "multibit" #define FORMAT_TAG "$multibit$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/scrypt AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 // just 4 is better for v2 salts static struct fmt_tests multibit_tests[] = { // Wallets created by MultiBit Classic 0.5.18 {"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"}, {"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"}, // MultiBit HD wallet 0.5.0 {"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char salt[16]; unsigned char block[32]; unsigned char iv[16]; unsigned char block2[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t > 1) { self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; } #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2) goto err; if (value == 1) { if ((p = strtokm(NULL, "*")) == NULL) // salt goto err; if (hexlenl(p, &extra) != 8 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks goto err; if (hexlenl(p, &extra) != 32 * 2 || extra) goto err; } else if (value == 2) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1) { for (i = 0; i < 8; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else if (cs.type == 2) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void multibit_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int is_bitcoinj_protobuf_data(unsigned char *block) { unsigned char c; int i; // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? if (block[0] == '\x0a' && block[1] < 128 && !memcmp((const char*)block + 2, "org.", 4)) { // If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover) for (i = 6; i < 14; i++) { c = block[i]; if ((c > 'z') || ((c < 'a') && ((c != '.')))) return 0; } return 1; // success } return 0; } static int is_base58(unsigned char *buffer, int length) { unsigned char c; int i; for (i = 0; i < length; i++) { c = buffer[i]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { return 0; } } return 1; // success } static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5"; static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char iv[16]; unsigned char key[32]; unsigned char outbuf[16]; AES_KEY aes_decrypt_key; int len = strlen(saved_key[index]); #ifdef _OPENMP if (cracked[index]) /* avoid false sharing of nearby elements */ #endif cracked[index] = 0; if (cur_salt->type == 1) { unsigned char c; MD5_CTX ctx; // key MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key, &ctx); // key + 16 MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key + 16, &ctx); // iv MD5_Init(&ctx); MD5_Update(&ctx, key + 16, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(iv, &ctx); AES_set_decrypt_key(key, 256, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); c = outbuf[0]; if (c == 'L' || c == 'K' || c == '5' || c == 'Q') { // Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover) // check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z] if (is_base58(outbuf + 1, 15)) { // decrypt second block AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_base58(outbuf, 16)) cracked[index] = 1; } } else if (c == '#') { // Does it look like a KnC for Android key backup? if (memcmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough cracked[index] = 1; } else if (c == '\x0a') { // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)? if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } else if (cur_salt->type == 2) { UTF16 password[PLAINTEXT_LENGTH * 2 + 1]; len = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1); if (len < 0) len = strlen16(password); crypto_scrypt((const unsigned char*)password, (len + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32); // 1 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; else { // 2 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, iv_hardcoded, 16); AES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_multibit = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, multibit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, multibit_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mixed_tentusscher_myo_epi_2004_S2_9.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_9.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5928834149027,0.00128330373181304,0.780307779232330,0.780055018733705,0.000174145408126877,0.485346186618098,0.00293516207326794,0.999998356983063,1.92561090443674e-08,1.88487092529666e-05,0.999772824420775,1.00713739870886,0.999995945796599,4.41779013989042e-05,0.492864370358447,10.0629845292030,139.540308692868}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2678658188600,0.000185186617039757,0.000132124407111086,0.000515886761168309,0.250188569257203,0.153314251022838,0.158501989253313,4.69616330756314,0.0144678840085242,1.89285514296658,1089.26406046390,0.000356678402399680,0.279508931563235,0.0134701701310225,0.00380118343938842,3.03411014370249e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
broadcast_reduce_customized-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce_customized-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_ #define MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_ #include "../../tensor/broadcast_reduce-inl.h" namespace mxnet { namespace op { namespace broadcast { using namespace mshadow; using mxnet_op::dot; using mxnet_op::ravel; using mxnet_op::unravel; using mxnet_op::unravel_dot; template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType* small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride, Reducer* reducer) { Shape<ndim> coord = unravel(idx, sshape); index_t j = ravel(coord, bshape); AType val, residual; reducer->SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { coord = unravel(k, rshape); reducer->Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual); } reducer->Finalize(val, residual); assign(&small[idx], addto, OType(val)); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto, const DType* big, OType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, Reducer* reducer) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign_wr<Reducer, ndim, AType, DType, OType, OP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, reducer); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void ReduceWithReducer(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, Reducer* reducer) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute_wr<Reducer, ndim, DType, DType, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute_wr<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType* small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride, Reducer* reducer) { Shape<ndim> coord = unravel(idx, small_shape); const index_t idx_big0 = ravel(coord, big_shape); const index_t idx_lhs0 = ravel(coord, lhs_shape0); const index_t idx_rhs0 = ravel(coord, rhs_shape0); DType val, residual; reducer->SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = unravel(k, rshape); index_t idx_big = idx_big0 + dot(coord_big, rstride); Shape<ndim> coord_lhs = unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride); reducer->Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } reducer->Finalize(val, residual); assign(&small[idx], addto, val); } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto, const DType* big, const DType* lhs, const DType* rhs, DType* small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, Reducer* reducer) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign_wr<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride, reducer); } } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void ReduceWithReducer(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs, Reducer* reducer) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute_wr<Reducer, ndim, DType, OP1, OP2>(N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(), reducer); } } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
critical-unrelated.c
/* * critical-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { #pragma omp critical { // Dummy region. } var++; } fprintf(stderr, "DONE\n"); } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
bfs_replicated.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csr.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; const int64_t ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int64_t ulong_bits_squared = sizeof(unsigned long) * sizeof(unsigned long) * CHAR_BIT * CHAR_BIT; static void allocate_memory(void) { int64_t maxlocalverts = g.max_nlocalverts; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csr(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csr_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) {return 1;} /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; const size_t* const restrict rowstarts = g.rowstarts; const int64_t* const restrict column = g.column; int64_t maxlocalverts = g.max_nlocalverts; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = ulong_bits * ulong_bits; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int lg_local_queue_size = lg_int64_t(local_queue_size); int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); #define SWIZZLE_VERTEX(c) ((VERTEX_OWNER(c) << lg_local_queue_size) * ulong_bits | VERTEX_LOCAL(c)) #if 0 int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ulong_bits; int bit_idx = vs % ulong_bits; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ulong_bits] |= (1UL << (word_idx % ulong_bits)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ulong_bits / ulong_bits] & (1UL << ((vs / ulong_bits) % ulong_bits))) != 0) && ((in_queue[vs / ulong_bits] & (1UL << (vs % ulong_bits))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ulong_bits] & (1UL << ((v) % ulong_bits))) != 0) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { if (in_queue[i * ulong_bits + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii = 0; ii < nlocalverts; ii += ulong_bits) { size_t i, i_end = ii + ulong_bits; if (i_end > nlocalverts) i_end = nlocalverts; for (i = ii; i < i_end; ++i) { if (!TEST_VISITED_LOCAL(i)) { size_t j, j_end = rowstarts[i + 1]; for (j = rowstarts[i]; j < j_end; ++j) { int64_t v1 = column[j]; int64_t v1_swizzled = SWIZZLE_VERTEX(v1); if (TEST_IN(v1_swizzled)) { pred[i] = (v1 & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)cur_level << 48); not_done |= 1; SET_VISITED_LOCAL(i); break; } } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ulong_bits + j]; visited[i * ulong_bits + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { int64_t v = vertex[i]; owner[i] = VERTEX_OWNER(v); local[i] = VERTEX_LOCAL(v); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
GB_unop__cimag_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cimag_fp32_fc32) // op(A') function: GB (_unop_tran__cimag_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cimagf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimagf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cimagf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cimag_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cimag_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__lnot_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__lnot_int64_int64 // op(A') function: GB_unop_tran__lnot_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__lnot_int64_int64 ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = !(z != 0) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__lnot_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gmapper.h
/* * This file should contain extern declarations for global variables in gmapper. * This is the only file that should be included by other modules (seeds, mapping, etc). */ #ifndef _GMAPPER_H #define _GMAPPER_H #ifdef __cplusplus //extern "C" { #endif #include "../gmapper/gmapper-definitions.h" #include "../common/debug.h" #include "../common/util.h" #include "../common/time_counter.h" #include "../common/gen-st.h" #undef EXTERN #undef STATIC #ifdef _MODULE_GMAPPER #include "../gmapper/gmapper-defaults.h" #define EXTERN(_type, _id, _init_val) _type _id = _init_val #define STATIC(_type, _id, _init_val) static _type _id = _init_val #else #define EXTERN(_type, _id, _init_val) extern _type _id #define STATIC(_type, _id, _init_val) #endif /* shrimp mode */ EXTERN(shrimp_mode_t, shrimp_mode, DEF_SHRIMP_MODE); EXTERN(shrimp_args_t, shrimp_args, {}); /* thread control */ EXTERN(int, num_threads, DEF_NUM_THREADS); EXTERN(int, chunk_size, DEF_CHUNK_SIZE); EXTERN(int, not_used, 0); /* parameters */ EXTERN(struct read_mapping_options_t *, unpaired_mapping_options[2], {}); EXTERN(int, n_unpaired_mapping_options[2], {}); EXTERN(struct readpair_mapping_options_t *, paired_mapping_options, NULL); EXTERN(int, n_paired_mapping_options, 0); EXTERN(int, mode_mirna, false); EXTERN(double, window_len, DEF_WINDOW_LEN); EXTERN(double, window_overlap, DEF_WINDOW_OVERLAP); EXTERN(int, match_mode, 0); EXTERN(int, num_outputs, DEF_NUM_OUTPUTS); EXTERN(int, max_alignments, DEF_MAX_ALIGNMENTS); EXTERN(int, num_tmp_outputs, 20 + DEF_NUM_OUTPUTS); EXTERN(int, anchor_width, DEF_ANCHOR_WIDTH); EXTERN(int, indel_taboo_len, DEF_INDEL_TABOO_LEN); EXTERN(uint32_t, list_cutoff, DEF_LIST_CUTOFF); EXTERN(bool, gapless_sw, DEF_GAPLESS_SW); EXTERN(bool, hash_filter_calls, DEF_HASH_FILTER_CALLS); EXTERN(int, longest_read_len, DEF_LONGEST_READ_LENGTH); EXTERN(bool, trim, false); EXTERN(int, trim_front, 0); EXTERN(int, trim_end, 0); EXTERN(bool, trim_first, true); EXTERN(bool, trim_second, true); EXTERN(bool, trim_illumina, false); EXTERN(char *, save_file, NULL); EXTERN(char *, load_file, NULL); EXTERN(char *, save_mmap, NULL); EXTERN(char *, load_mmap, NULL); EXTERN(unsigned int, progress, DEF_PROGRESS); EXTERN(bool, compute_mapping_qualities, true); EXTERN(bool, no_qv_check, false); //EXTERN(int, score_difference_mq_cutoff, 0); EXTERN(bool, all_contigs, false); EXTERN(bool, use_sanger_qvs, true); EXTERN(int, qual_vector_offset, 0); EXTERN(int, qual_delta, 33); EXTERN(int, min_avg_qv, 10); /* Flags */ EXTERN(bool, strata_flag, false); /* get only top scoring hits */ EXTERN(bool, Cflag, false); /* do complement only */ EXTERN(bool, Fflag, false); /* do positive (forward) only */ EXTERN(bool, Hflag, false); /* use hash table, not lookup */ EXTERN(bool, Pflag, false); /* pretty print results */ EXTERN(bool, Rflag, false); /* add read sequence to output*/ EXTERN(bool, Tflag, true); /* reverse sw full tie breaks */ EXTERN(bool, Dflag, false); /* print statistics for each thread */ EXTERN(bool, Eflag, true); /* output sam format */ EXTERN(bool, Xflag, false); /* print insert histogram */ EXTERN(bool, Yflag, false); /* print genome projection histogram */ EXTERN(bool, Vflag, true); /* automatic genome index trimming */ EXTERN(bool, Qflag, true); /* use fastq reads */ EXTERN(bool, Gflag, true); /* global alignment flag ! */ EXTERN(bool, Bflag, false); /* be like bfast - cs only! */ EXTERN(bool, SQFflag, false); /* discard low quality kmers */ EXTERN(bool, extra_sam_fields, false); EXTERN(bool, single_best_mapping, false); EXTERN(bool, improper_mappings, true); EXTERN(bool, autodetect_input, true); EXTERN(bool, ignore_qvs, false); /* if input is fastq, ignore qvs in analysis */ //EXTERN(bool, hack, false); /* Scores */ EXTERN(int, match_score, DEF_LS_MATCH_SCORE); EXTERN(int, mismatch_score, DEF_LS_MISMATCH_SCORE); EXTERN(int, a_gap_open_score, DEF_LS_A_GAP_OPEN); EXTERN(int, a_gap_extend_score, DEF_LS_A_GAP_EXTEND); EXTERN(int, b_gap_open_score, DEF_LS_B_GAP_OPEN); EXTERN(int, b_gap_extend_score, DEF_LS_B_GAP_EXTEND); EXTERN(int, crossover_score, DEF_CS_XOVER_SCORE); EXTERN(double, score_alpha, 0.0); EXTERN(double, score_beta, 0.0); EXTERN(double, pr_mismatch, 0.0); EXTERN(double, pr_xover, 0.0); EXTERN(double, pr_del_open, 0.0); EXTERN(double, pr_del_extend, 0.0); EXTERN(double, pr_ins_open, 0.0); EXTERN(double, pr_ins_extend, 0.0); EXTERN(double, window_gen_threshold, DEF_WINDOW_GEN_THRESHOLD); EXTERN(double, sw_vect_threshold, DEF_SW_VECT_THRESHOLD); EXTERN(double, sw_full_threshold, DEF_SW_FULL_THRESHOLD); /* shrimp parameter/option parsing */ STATIC(struct option const, standard_options[], DEF_STANDARD_OPTIONS); STATIC(struct option const, colour_space_options[], DEF_COLOUR_SPACE_OPTIONS); STATIC(struct option const, letter_space_options[], DEF_LETTER_SPACE_OPTIONS); STATIC(size_t const, standard_entries, sizeof(standard_options)/sizeof(struct option)); STATIC(size_t const, letter_entries, sizeof(letter_space_options)/sizeof(struct option)); STATIC(size_t const, colour_entries, sizeof(colour_space_options)/sizeof(struct option)); /* pairing mode */ EXTERN(int, pair_mode, DEF_PAIR_MODE); EXTERN(int, min_insert_size, DEF_MIN_INSERT_SIZE); EXTERN(int, max_insert_size, DEF_MAX_INSERT_SIZE); EXTERN(double, insert_size_mean, DEF_INSERT_SIZE_MEAN); EXTERN(double, insert_size_stddev, DEF_INSERT_SIZE_STDDEV); EXTERN(llint, insert_histogram[100], {}); EXTERN(int, insert_histogram_bucket_size, 1); EXTERN(int, insert_histogram_load, 100); EXTERN(char *, reads_filename, NULL); EXTERN(char *, left_reads_filename, NULL); EXTERN(char *, right_reads_filename, NULL); EXTERN(bool, single_reads_file, true); STATIC(char const * const, pair_mode_string[5], DEF_PAIR_MODE_STRING); EXTERN(bool, pair_reverse[5][2], DEF_PAIR_REVERSE); /* seed management */ EXTERN(int, n_seeds, 0); EXTERN(struct seed_type *, seed, NULL); EXTERN(uint32_t * *, seed_hash_mask, NULL); EXTERN(int, max_seed_span, 0); EXTERN(int, min_seed_span, MAX_SEED_SPAN); EXTERN(int, avg_seed_span, 0); /* Thread output buffer */ EXTERN(char **, thread_output_buffer, NULL); EXTERN(size_t *, thread_output_buffer_sizes, NULL); EXTERN(char **, thread_output_buffer_filled, NULL); EXTERN(unsigned int *, thread_output_buffer_chunk, NULL); EXTERN(size_t, thread_output_buffer_initial, DEF_THREAD_OUTPUT_BUFFER_INITIAL); EXTERN(size_t, thread_output_buffer_increment, DEF_THREAD_OUTPUT_BUFFER_INCREMENT); EXTERN(size_t, thread_output_buffer_safety, DEF_THREAD_OUTPUT_BUFFER_SAFETY); EXTERN(unsigned int, thread_output_heap_capacity, DEF_THREAD_OUTPUT_HEAP_CAPACITY); /* SAM stuff */ EXTERN(FILE *, unaligned_reads_file, NULL); EXTERN(FILE *, aligned_reads_file, NULL); EXTERN(bool, sam_unaligned, false); EXTERN(bool, half_paired, true); //output reads in paired mode that only have one mapping EXTERN(bool, sam_r2, false); EXTERN(char *, sam_header_filename, NULL); EXTERN(char *, sam_read_group_name, NULL); EXTERN(char *, sam_sample_name, NULL); EXTERN(FILE *, sam_header_hd, NULL); EXTERN(FILE *, sam_header_sq, NULL); EXTERN(FILE *, sam_header_rg, NULL); EXTERN(FILE *, sam_header_pg, NULL); /* Statistics */ EXTERN(llint, nreads, 0); EXTERN(llint, nreads_mod, 0); EXTERN(llint, total_reads_matched, 0); EXTERN(llint, total_pairs_matched, 0); EXTERN(llint, total_reads_matched_conf, 0); EXTERN(llint, total_pairs_matched_conf, 0); EXTERN(llint, total_reads_dropped, 0); EXTERN(llint, total_pairs_dropped, 0); EXTERN(llint, total_single_matches, 0); EXTERN(llint, total_paired_matches, 0); EXTERN(llint, total_dup_single_matches, 0); /* number of duplicate hits */ EXTERN(llint, total_dup_paired_matches, 0); EXTERN(llint, load_genome_usecs, 0); EXTERN(llint, mapping_wallclock_usecs, 0); /* per-thread counts and statistics */ //EXTERN(llint, read_handle_usecs, 0); //EXTERN(llint, wait_ticks, 0); //EXTERN(llint, anchor_list_ticks, 0); //EXTERN(llint, region_counts_ticks, 0); //EXTERN(llint, mp_region_counts_ticks, 0); //EXTERN(llint, hit_list_ticks, 0); //EXTERN(llint, pass1_ticks, 0); //EXTERN(llint, get_vector_hits_ticks, 0); //EXTERN(llint, pass2_ticks, 0); //EXTERN(llint, duplicate_removal_ticks, 0); //EXTERN(stat_t, anchor_list_init_size, 0); //EXTERN(stat_t, n_big_gaps_anchor_list, 0); //EXTERN(stat_t, n_anchors_discarded, 0); EXTERN(int, anchor_list_big_gap, DEF_ANCHOR_LIST_BIG_GAP); // thread-private globals typedef struct tpg_t { llint read_handle_usecs; //llint wait_ticks; time_counter wait_tc; //llint anchor_list_ticks; time_counter anchor_list_tc; //llint region_counts_ticks; time_counter region_counts_tc; //llint mp_region_counts_ticks; time_counter mp_region_counts_tc; //llint hit_list_ticks; time_counter hit_list_tc; //llint pass1_ticks; time_counter pass1_tc; //llint get_vector_hits_ticks; time_counter get_vector_hits_tc; //llint pass2_ticks; time_counter pass2_tc; //llint duplicate_removal_ticks; time_counter duplicate_removal_tc; stat_t anchor_list_init_size; stat_t n_big_gaps_anchor_list; stat_t n_anchors_discarded; } tpg_t; EXTERN(tpg_t, tpg, {}); #pragma omp threadprivate(tpg) EXTERN(count_t, mem_genomemap, {}); EXTERN(count_t, mem_small, {}); EXTERN(count_t, mem_thread_buffer, {}); EXTERN(count_t, mem_mapping, {}); EXTERN(count_t, mem_sw, {}); /* genome map */ EXTERN(uint32_t ***, genomemap, NULL); EXTERN(uint32_t **, genomemap_len, NULL); EXTERN(uint32_t *, contig_offsets, NULL); /* offset info for genome contigs */ EXTERN(char **, contig_names, NULL); EXTERN(int, num_contigs, 0); EXTERN(uint32_t **, genome_contigs, NULL); /* genome -- always in letter */ EXTERN(uint32_t **, genome_contigs_rc, NULL); /* reverse complemets */ EXTERN(uint32_t **, genome_cs_contigs, NULL); EXTERN(uint32_t **, genome_cs_contigs_rc, NULL); EXTERN(int *, genome_initbp, NULL); EXTERN(uint32_t *, genome_len, NULL); EXTERN(bool, genome_is_rna, false); /* is genome RNA (has uracil)?*/ EXTERN(long long int, total_genome_size, 0); EXTERN(gen_st, contig_offsets_gen_st, {}); EXTERN(ptr_and_sz *, genomemap_block, NULL); EXTERN(ptr_and_sz, genome_contigs_block, {}); EXTERN(ptr_and_sz, genome_contigs_rc_block, {}); EXTERN(ptr_and_sz, genome_cs_contigs_block, {}); /* region handling */ EXTERN(bool, use_regions, DEF_USE_REGIONS); EXTERN(int, region_bits, DEF_REGION_BITS); EXTERN(int, region_overlap, DEF_REGION_OVERLAP); EXTERN(int, n_regions, (1 << (32 - DEF_REGION_BITS))); typedef uint16_t region_map_t; EXTERN(region_map_t *, region_map[2][2], {}); EXTERN(int, region_map_id, 0); EXTERN(int, region_map_id_bits, 13); //EXTERN(int, region_map_max_count, ((1 << 8) - 1)); #pragma omp threadprivate(region_map, region_map_id) /* contains inlined calls; uses gapless_sw and hash_filter_calls vars */ #include "../common/f1-wrapper.h" //void hit_free_sfrp(struct read_hit *); void read_free(struct read_entry *); void read_free_hit_list(struct read_entry *); void read_free_anchor_list(struct read_entry *); void read_free_full(struct read_entry *); /* pulled off the web; this may or may not be any good */ static inline uint32_t hash(uint32_t a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } /* hash-based version or kmer -> map index function for larger seeds */ static inline uint32_t kmer_to_mapidx_hash(uint32_t *kmerWindow, int sn) { static uint32_t maxidx = ((uint32_t)1 << 2*HASH_TABLE_POWER) - 1; uint32_t mapidx = 0; int i; assert(seed_hash_mask != NULL); for (i = 0; i < BPTO32BW(max_seed_span); i++) mapidx = hash((kmerWindow[i] & seed_hash_mask[sn][i]) ^ mapidx); return mapidx & maxidx; } /* * Compress the given kmer into an index in 'readmap' according to the seed. * While not optimal, this is only about 20% of the spaced seed scan time. * * This is the original version for smaller seeds. * * XXX- This algorithm only considers bases 0-3, which implies overlap * when we have other bases (mainly uracil, but also wobble codes). * This won't affect sensitivity, but may cause extra S-W calls. */ static inline uint32_t kmer_to_mapidx_orig(uint32_t *kmerWindow, int sn) { bitmap_type a = seed[sn].mask[0]; uint32_t mapidx = 0; int i = 0; do { if ((a & 0x1) == 0x1) { mapidx <<= 2; mapidx |= ((kmerWindow[i/8] >> (i%8)*4) & 0x3); } a >>= 1; i++; } while (a != 0x0); assert(mapidx < power(4, seed[sn].weight)); return mapidx; } #define KMER_TO_MAPIDX(kmer, sn) (Hflag? kmer_to_mapidx_hash((kmer), (sn)) : kmer_to_mapidx_orig((kmer), (sn))) /* get contig number from absolute index */ static inline void get_contig_num(uint32_t idx, int * cn) { if (num_contigs < 100) { *cn = 0; while (*cn < num_contigs - 1 && idx >= contig_offsets[*cn + 1]) (*cn)++; } else { /* int l, r, m; l = 0; r = num_contigs; while (l + 1 < r) { m = (r + l)/2; if (idx < contig_offsets[m]) r = m; else l = m; } *cn = l; */ *cn = gen_st_search(&contig_offsets_gen_st, idx); } assert(contig_offsets[*cn] <= idx && idx < contig_offsets[*cn] + genome_len[*cn]); } #ifdef ENABLE_LOW_QUALITY_FILTER // #define AUTOMATICALLY_DISCARD_LOW_QUAL_POSITIONS #define INDIVIDUAL_QUALITY_THRESHOLD 3 /* 3 -> 50% chance to be right*/ #define AVERAGE_QUALITY_THRESHOLD 6 /* 6 -> 75% chance to be right*/ #define TOP_QUALITY_CUTOFF 10 /* 10 -> 90% chance to be right */ #ifdef AUTOMATICALLY_DISCARD_LOW_QUAL_POSITIONS #define UNTRUSTED_QUALITY (-128) #else #define UNTRUSTED_QUALITY 0 #endif static inline void read_quality_filter_preprocess (const char * original_qual, char * processed_qual) { int i, size = strlen(original_qual); for (i = 0; i < size; ++i) { processed_qual[i] = original_qual[i] - qual_delta; processed_qual[i] = (processed_qual[i] >= TOP_QUALITY_CUTOFF) ? TOP_QUALITY_CUTOFF : (processed_qual[i] < INDIVIDUAL_QUALITY_THRESHOLD) ? UNTRUSTED_QUALITY : processed_qual[i]; } } static inline bool is_low_quality_read_subsequence(const char * quality, const int position, const seed_type seed) { int i, subsequence_quality = 0; if (!quality) { return false; } for (i = 0; i < seed.span; ++i) { subsequence_quality += bitmap_extract(seed.mask, 1, seed.span - i - 1) * MAX(quality[position + i], UNTRUSTED_QUALITY); } if (subsequence_quality <= 0) { fprintf(stderr, "%d (pos %d, span %d) <<< ", subsequence_quality, position, seed.span); for (i = 0; i < seed.span; ++i) { fprintf (stderr, "%d ", quality[position + i]); } exit(1); } return (subsequence_quality >= AVERAGE_QUALITY_THRESHOLD * seed.weight); } #endif #ifdef __cplusplus //} /* extern "C" */ #endif #endif
GB_binop__lt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_uint8 // A.*B function (eWiseMult): GB_AemultB__lt_uint8 // A*D function (colscale): GB_AxD__lt_uint8 // D*A function (rowscale): GB_DxB__lt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__lt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__lt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_uint8 // C=scalar+B GB_bind1st__lt_uint8 // C=scalar+B' GB_bind1st_tran__lt_uint8 // C=A+scalar GB_bind2nd__lt_uint8 // C=A'+scalar GB_bind2nd_tran__lt_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT8 || GxB_NO_LT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
spectra.c
/** @file spectra.c Documented spectra module * * Julien Lesgourgues, 25.08.2010 * * This module computes the anisotropy and Fourier power spectra * \f$ C_l^{X}, P(k), ... \f$'s given the transfer and Bessel functions * (for anisotropy spectra), the source functions (for Fourier spectra) * and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing \f$ C_l \f$ at any l * -# spectra_spectrum_at_z() at any time for computing P(k) at any z * -# spectra_spectrum_at_k_and z() at any time for computing P at any k and z * -# spectra_free() at the end */ #include "spectra.h" int spectra_bandpower(struct spectra * psp, int l1, int l2, double * TT_II, double * TT_RI, double * TT_RR ) { int l; int index_md; double * cl_tot; double ** cl_md; double ** cl_md_ic; class_alloc(cl_tot,psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md,psp->md_size*sizeof(double*),psp->error_message); class_alloc(cl_md_ic,psp->md_size*sizeof(double*),psp->error_message); for (index_md=0;index_md<psp->md_size; index_md++) { class_alloc(cl_md[index_md],psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md_ic[index_md],psp->ct_size*psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); } *TT_RR=0.; *TT_RI=0.; *TT_II=0.; for (l=l1; l<=l2; l++) { class_call(spectra_cl_at_l(psp, (double)l, cl_tot, cl_md, cl_md_ic), psp->error_message, psp->error_message); *TT_RR += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,0,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; *TT_RI += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]*2.; *TT_II += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(1,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; } for (index_md=0;index_md<psp->md_size; index_md++) { free(cl_md[index_md]); free(cl_md_ic[index_md]); } free(cl_tot); free(cl_md); free(cl_md_ic); return _SUCCESS_; } /** * Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions. * * This routine evaluates all the \f$C_l\f$'s at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Output: total \f$C_l\f$'s for all types (TT, TE, EE, etc..) * @param cl_md Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra * psp, double l, double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */ double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */ double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_ct; /** - (a) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; } } /** - (b) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md]-1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** - (c) loop over modes */ if (psp->md_size > 1) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (c.1.) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md[index_md][index_ct]=0.; } } /** - --> (c.2.) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* sum up all ic for each mode */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md[index_md][index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** - --> (c.3.) add contribution of cl_md[index_md] to cl_tot */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]+=cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This routine evaluates the matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: \f$ Mpc^3\f$) * * - logarithmic: returns \f$\ln{P(k)}\f$ * * One little subtlety: in case of several correlated initial conditions, * the cross-correlation spectrum can be negative. Then, in logarithmic mode, * the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$ * (from -1 to 1) instead of \f$\ln{P_{12}}\f$ * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */ double * output_ic /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; double tau,ln_tau; int index_ic1,index_ic2,index_ic1_ic2; index_md = psp->index_md_scalars; /** - first step: convert z into \f$\ln{\tau}\f$ */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** - --> (a) if only values at tau=tau_today are stored and we want \f$ P(k,z=0)\f$, no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) if (psp->ic_size[index_md] == 1) { output_tot[index_k] = psp->ln_pk[index_k]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = psp->ln_pk[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]; } } } /** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { if (psp->ic_ic_size[index_md] == 1) { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, ln_tau, &last_index, output_ic, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } } /** - third step: if there are several initial conditions, compute the total P(k) and set back all uncorrelated coefficients to exactly zero. Check positivity of total P(k). */ if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = 0.; for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) { output_tot[index_k] += exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } else { if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { output_tot[index_k] += 2. * output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] * sqrt(exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]) * exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])])); } else output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } class_test(output_tot[index_k] <= 0., psp->error_message, "for k=%e, z=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total=%e results negative", exp(psp->ln_k[index_k]),z,output_tot[index_k]); } } /** - fourth step: depending on requested mode (linear or logarithmic), apply necessary transformation to the output arrays */ /** - --> (a) linear mode: if only one initial condition, convert output_pk to linear format; if several initial conditions, convert output_ic to linear format, output_tot is already in this format */ if (mode == linear) { if (psp->ic_size[index_md] == 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } else { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] = output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] *sqrt(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])] * output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } } } } } /** - --> (b) logarithmic mode: if only one initial condition, nothing to be done; if several initial conditions, convert output_tot to logarithmic format, output_ic is already in this format */ else { if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { /* we have already checked above that output_tot was positive */ output_tot[index_k] = log(output_tot[index_k]); } } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot, /* pointer to a single number (must be already allocated) */ double * pk_ic /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */ ) { /** Summary: */ /** - define local variables */ int index_md; int index_k; int last_index; int index_ic1,index_ic2,index_ic1_ic2; double * spectrum_at_z = NULL; double * spectrum_at_z_ic = NULL; double * spline; double * pk_primordial_k = NULL; double kmin; double * pk_primordial_kmin = NULL; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /** - deal with case 0 <= k < kmin */ if (k < exp(psp->ln_k[0])) { /** - --> (a) subcase k=0: then P(k)=0 */ if (k == 0.) { if (psp->ic_size[index_md] == 1) { *pk_tot=0.; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = 0.; } } } /** - --> (b) subcase 0<k<kmin: in this case we know that on super-Hubble scales: * P(k) = [some number] * k * P_primordial(k) * so * P(k) = P(kmin) * (k P_primordial(k)) / (kmin P_primordial(kmin)) * (note that the result is accurate only if kmin is such that [a0 kmin] << H0) */ else { /* compute P(k,z) which contains P(kmin,z)*/ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, linear, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* compute P_primordial(k) */ class_alloc(pk_primordial_k, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, k, pk_primordial_k), ppm->error_message,psp->error_message); /* compute P_primordial(kmin) */ kmin = exp(psp->ln_k[0]); class_alloc(pk_primordial_kmin, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, kmin, pk_primordial_kmin), ppm->error_message, psp->error_message); /* apply above analytic approximation for P(k) */ index_k=0; if (psp->ic_size[index_md] == 1) { index_ic1_ic2 = 0; *pk_tot = spectrum_at_z[index_k] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = spectrum_at_z_ic[index_ic1_ic2] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } } free(spectrum_at_z); if (psp->ic_size[index_md] > 1) free(spectrum_at_z_ic); free(pk_primordial_k); free(pk_primordial_kmin); } } /** - deal with case kmin <= k <= kmax */ else { /* compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, logarithmic, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); if (psp->ic_size[index_md] == 1) { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); } else { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, psp->ic_ic_size[index_md], spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, spline, psp->ic_ic_size[index_md], log(k), &last_index, pk_ic, psp->ic_ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); pk_ic[index_ic1_ic2] = exp(pk_ic[index_ic1_ic2]); } for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { pk_ic[index_ic1_ic2] = pk_ic[index_ic1_ic2]* sqrt(pk_ic[index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]* pk_ic[index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } else { pk_ic[index_ic1_ic2] = 0.; } } } free(spectrum_at_z_ic); } free(spectrum_at_z); free(spline); } /** - last step: if more than one condition, sum over pk_ic to get pk_tot, and set back coefficients of non-correlated pairs to exactly zero. */ if (psp->ic_size[index_md] > 1) { *pk_tot = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { if (index_ic1 == index_ic2) *pk_tot += pk_ic[index_ic1_ic2]; else *pk_tot += 2.*pk_ic[index_ic1_ic2]; } else { pk_ic[index_ic1_ic2] = 0.; } } } class_test(*pk_tot <= 0., psp->error_message, "for k=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total results negative",k); } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This routine evaluates the non-linear matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: Mpc^3) * * - logarithmic: returns ln(P(k)) * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot /* array with argument output_tot[index_k] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_k; double tau,ln_tau; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** - --> (a) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = psp->ln_pk_nl[index_k]; } } /** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ddln_pk_nl, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } /** - fourth step: eventually convert to linear format */ if (mode == linear) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_nl_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot /* pointer to a single number (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * spectrum_at_z = NULL; double * spline; index_md = psp->index_md_scalars; /** - check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < exp(psp->ln_k[0])) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /** - compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); class_call(spectra_pk_nl_at_z(pba, psp, logarithmic, z, spectrum_at_z), psp->error_message, psp->error_message); /** - get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); free(spectrum_at_z); free(spline); return _SUCCESS_; } /** * Matter transfer functions \f$ T_i(k) \f$ for arbitrary redshift and for all * initial conditions. * * This routine evaluates the matter transfer functions at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want \f$ T_i(k,z=0)\f$) * * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background * pba, struct spectra * psp, double z, double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; int index_tr; double tau,ln_tau; int index_ic; index_md = psp->index_md_scalars; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: store the matter transfer functions in the output array */ /** - --> (a) if only values at tau=tau_today are stored and we want \f$ T_i(k,z=0)\f$, no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only T_i(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) for (index_tr=0; index_tr<psp->tr_size; index_tr++) for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] = psp->matter_transfer[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr]; } /** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ddmatter_transfer, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, ln_tau, &last_index, output, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * Matter transfer functions \f$ T_i(k)\f$ for arbitrary wavenumber, redshift * and initial condition. * * This routine evaluates the matter transfer functions at a given * value of k and z by interpolating in a table of all \f$ T_i(k,z)\f$'s * computed at this z by spectra_tk_at_z() (when kmin <= k <= kmax). * Returns an error when k<kmin or k > kmax. * * This function can be called from whatever module at whatever time, * provided that spectra_init() has been called before, and * spectra_free() has not been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background * pba, struct spectra * psp, double k, double z, double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * tks_at_z; double * ddtks_at_z; index_md = psp->index_md_scalars; /** - check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_tk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /** - compute T_i(k,z) */ class_alloc(tks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(spectra_tk_at_z(pba, psp, z, tks_at_z), psp->error_message, psp->error_message); /** - get its second derivatives w.r.t. k with spline, then interpolate */ class_alloc(ddtks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, tks_at_z, psp->tr_size*psp->ic_size[index_md], ddtks_at_z, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, tks_at_z, ddtks_at_z, psp->tr_size*psp->ic_size[index_md], log(k), &last_index, output, psp->tr_size*psp->ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); free(tks_at_z); free(ddtks_at_z); return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfer structure * @param ppm Input: pointer to primordial structure * @param pnl Input: pointer to nonlinear structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct transfers * ptr, struct spectra * psp ) { /** Summary: */ double TT_II,TT_RI,TT_RR; int l1,l2; /** - check that we really want to compute at least one spectrum */ if ((ppt->has_cls == _FALSE_) && (ppt->has_pk_matter == _FALSE_) && (ppt->has_density_transfers == _FALSE_) && (ppt->has_velocity_transfers == _FALSE_)) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed linear spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); /** - deal with \f$ C_l\f$'s, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); } else { psp->ct_size=0; } /** - deal with \f$ P(k,\tau)\f$ and \f$ T_i(k,\tau)\f$ */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_k_and_tau(pba,ppt,psp), psp->error_message, psp->error_message); if (ppt->has_pk_matter == _TRUE_) { class_call(spectra_pk(pba,ppt,ppm,pnl,psp), psp->error_message, psp->error_message); } else { psp->ln_pk=NULL; } if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_matter_transfers(pba,ppt,psp), psp->error_message, psp->error_message); } else { psp->matter_transfer=NULL; } } else { psp->ln_k_size=0; } /* if there is one isocurvature mode, compute and store in the psp structure the isocurvature contribution to some bandpowers in different ranges of l, and the contribution to the primordial spectrum at different wavenumbers (used in the Planck analysis) */ if ((ppt->has_scalars == _TRUE_) && (ppt->has_cls == _TRUE_) && (ppt->ic_size[ppt->index_md_scalars] == 2)) { l1=2; l2=20; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_20=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_20=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_20=TT_RR/(TT_II+TT_RI+TT_RR); l1=21; l2=200; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_21_200=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_21_200=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_21_200=TT_RR/(TT_II+TT_RI+TT_RR); l1=201; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_201_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_201_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_201_2500=TT_RR/(TT_II+TT_RI+TT_RR); l1=2; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_2500=TT_RR/(TT_II+TT_RI+TT_RR); if (ppt->has_cdi==_TRUE_) { psp->alpha_kp=ppm->f_cdi*ppm->f_cdi /(1.+ppm->f_cdi*ppm->f_cdi); psp->alpha_k1=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_nid==_TRUE_) { psp->alpha_kp=ppm->f_nid*ppm->f_nid /(1.+ppm->f_nid*ppm->f_nid); psp->alpha_k1=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_niv==_TRUE_) { psp->alpha_kp=ppm->f_niv*ppm->f_niv /(1.+ppm->f_niv*ppm->f_niv); psp->alpha_k1=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot))); } } return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra * psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } if (psp->ln_k_size > 0) { free(psp->ln_tau); free(psp->ln_k); if (psp->ln_pk != NULL) { free(psp->ln_pk); if (psp->ln_tau_size > 1) { free(psp->ddln_pk); } if (psp->ln_pk_nl != NULL) { free(psp->ln_pk_nl); if (psp->ln_tau_size > 1) { free(psp->ddln_pk_nl); } } } if (psp->matter_transfer != NULL) { free(psp->matter_transfer); if (psp->ln_tau_size > 1) { free(psp->ddmatter_transfer); } } } } for (index_md=0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ){ int index_ct; int index_md; int index_ic1_ic2; int index_tr; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *)*psp->md_size, psp->error_message); for (index_md=0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short)*psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct=0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt=index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee=index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te=index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb=index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp=index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp=index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep=index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size=ppt->selection_num; else psp->d_size=0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_dd = _FALSE_; } /* the computation of C_l^Td would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd=index_ct; index_ct+=psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_ll = _FALSE_; } /* the computation of C_l^Tl would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl=index_ct; index_ct += psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* infer from input quantities the l_max for each mode and type, l_max_ct[index_md][index_type]. Maximize it over index_ct, and then over index_md. */ class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message); class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message); for (index_md=0; index_md<psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct=psp->index_ct_dd; index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct=psp->index_ct_td; index_ct<psp->index_ct_td+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct=psp->index_ct_pd; index_ct<psp->index_ct_pd+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct=psp->index_ct_ll; index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct=psp->index_ct_tl; index_ct<psp->index_ct_tl+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct=psp->index_ct_dl; index_ct < psp->index_ct_dl+(psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md=0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct=0.; index_ct<psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]); } } /* indices for species associated with a matter transfer function in Fourier space */ index_tr=0; class_define_index(psp->index_tr_delta_g,ppt->has_source_delta_g,index_tr,1); class_define_index(psp->index_tr_delta_b,ppt->has_source_delta_b,index_tr,1); class_define_index(psp->index_tr_delta_cdm,ppt->has_source_delta_cdm,index_tr,1); class_define_index(psp->index_tr_delta_dcdm,ppt->has_source_delta_dcdm,index_tr,1); class_define_index(psp->index_tr_delta_scf,ppt->has_source_delta_scf,index_tr,1); class_define_index(psp->index_tr_delta_fld,ppt->has_source_delta_fld,index_tr,1); class_define_index(psp->index_tr_delta_ur,ppt->has_source_delta_ur,index_tr,1); class_define_index(psp->index_tr_delta_dr,ppt->has_source_delta_dr,index_tr,1); class_define_index(psp->index_tr_delta_ncdm1,ppt->has_source_delta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_delta_tot,ppt->has_density_transfers,index_tr,1); class_define_index(psp->index_tr_phi,ppt->has_source_phi,index_tr,1); class_define_index(psp->index_tr_psi,ppt->has_source_psi,index_tr,1); /* indices for species associated with a velocity transfer function in Fourier space */ class_define_index(psp->index_tr_theta_g,ppt->has_source_theta_g,index_tr,1); class_define_index(psp->index_tr_theta_b,ppt->has_source_theta_b,index_tr,1); class_define_index(psp->index_tr_theta_cdm,ppt->has_source_theta_cdm,index_tr,1); class_define_index(psp->index_tr_theta_dcdm,ppt->has_source_theta_dcdm,index_tr,1); class_define_index(psp->index_tr_theta_scf,ppt->has_source_theta_scf,index_tr,1); class_define_index(psp->index_tr_theta_fld,ppt->has_source_theta_fld,index_tr,1); class_define_index(psp->index_tr_theta_ur,ppt->has_source_theta_ur,index_tr,1); class_define_index(psp->index_tr_theta_dr,ppt->has_source_theta_dr,index_tr,1); class_define_index(psp->index_tr_theta_ncdm1,ppt->has_source_theta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_theta_tot,ppt->has_velocity_transfers,index_tr,1); psp->tr_size = index_tr; return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra \f$ C_l \f$'s, * given the transfer functions and primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */ double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double * transfer_ic2; /* idem */ double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/ /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop; #endif /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message); class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message); class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message); /** - store values of l */ for (index_l=0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - --> (b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */ /** - --> (c) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); /* non-diagonal coefficients should be computed only if non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \ private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_alloc_parallel(cl_integrand, ptr->q_size*cl_integrand_num_columns*sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md]*sizeof(double), psp->error_message); #pragma omp for schedule (dynamic) /** - ---> loop over l values defined in the transfer module. For each l, compute the \f$ C_l\f$'s for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { #pragma omp flush(abort) class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ #ifdef _OPENMP tstop = omp_get_wtime(); if (psp->spectra_verbose > 1) printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n", __func__,tstop-tstart,omp_get_thread_num()); #endif free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* set non-diagonal coefficients to zero if pair of ic's uncorrelated */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - --> (d) now that for a given mode, all possible \f$ C_l\f$'s have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the \f$ C_l\f$'s for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input: index of mode under consideration * @param index_ic1 Input: index of first initial condition in the correlator * @param index_ic2 Input: index of second initial condition in the correlator * @param index_l Input: index of multipole under consideration * @param cl_integrand_num_columns Input: number of columns in cl_integrand * @param cl_integrand Input: an allocated workspace * @param primordial_pk Input: table of primordial spectrum values * @param transfer_ic1 Input: table of transfer function values for first initial condition * @param transfer_ic2 Input: table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double * cl_integrand, double * primordial_pk, double * transfer_ic1, double * transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1,index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp=0.; double transfer_ic2_temp=0.; double * transfer_ic1_nc=NULL; double * transfer_ic2_nc=NULL; double factor; int index_q_spline=0; index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message); class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message); } for (index_q=0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q*cl_integrand_num_columns+0] = k; class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd+index_d1] + transfer_ic1[ptr->index_tt_d0+index_d1] + transfer_ic1[ptr->index_tt_d1+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd+index_d1] + transfer_ic2[ptr->index_tt_d0+index_d1] + transfer_ic2[ptr->index_tt_d1+index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1+index_d1] + transfer_ic1[ptr->index_tt_nc_g2+index_d1] + transfer_ic1[ptr->index_tt_nc_g3+index_d1] + transfer_ic1[ptr->index_tt_nc_g4+index_d1] + transfer_ic1[ptr->index_tt_nc_g5+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1+index_d1] + transfer_ic2[ptr->index_tt_nc_g2+index_d1] + transfer_ic2[ptr->index_tt_nc_g3+index_d1] + transfer_ic2[ptr->index_tt_nc_g4+index_d1] + transfer_ic2[ptr->index_tt_nc_g5+index_d1]; } } } /* integrand of Cl's */ /* note: we must integrate C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] where calP(k) is the dimensionless power spectrum equal to a constant in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors) In the literature, people often rewrite the integral in terms of q and absorb the Jacobian of the change of variables in a redefinition of the primodial spectrum. Let us illustrate this for scalars: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)] This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)] Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in the initial conditions of scalar transfer functions (if normalized to curvature R=1) is also absorbed in the definition of the power spectrum. Then the curvature power spectrum reads calP = (q2-4K)/[q(q2-K)] * (k/k)^ns In CLASS we prefer to define calP = (k/k)^ns like in the flat case, to have the factor (q2-4K) in the initialk conditions, and the factor 1/[q(q2-K)] doesn't need to be there since we integrate over dk/k. For tensors, the change of variable described above gives a slightly different result: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)] But for tensors there are extra curvature-related correction factors to take into account. See the comments in the perturbation module, related to initial conditions for tensors. */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]= primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] + transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=MAX(index_d1-psp->non_diag,0); index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } } for (index_ct=0; index_ct<psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1+index_ct, 1+psp->ct_size+index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* Technical point: we will now do a spline integral over the whole range of k's, excepted in the closed (K>0) case. In that case, it is a bad idea to spline over the values of k corresponding to nu<nu_flat_approximation. In this region, nu values are integer values, so the steps dq and dk have some discrete jumps. This makes the spline routine less accurate than a trapezoidal integral with finer sampling. So, in the closed case, we set index_q_spline to ptr->index_q_flat_approximation, to tell the integration routine that below this index, it should treat the integral as a trapezoidal one. For testing, one is free to set index_q_spline to 0, to enforce spline integration everywhere, or to (ptr->q_size-1), to enforce trapezoidal integration everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1+index_ct, 1+psp->ct_size+index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* in the closed case, instead of an integral, we have a discrete sum. In practice, this does not matter: the previous routine does give a correct approximation of the discrete sum, both in the trapezoidal and spline regions. The only error comes from the first point: the previous routine assumes a weight for the first point which is too small compared to what it would be in the an actual discrete sum. The line below correct this problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.; } /* we have the correct C_l now. We can store it in the transfer structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /** * This routine computes the values of k and tau at which the matter * power spectra \f$ P(k,\tau)\f$ and the matter transfer functions \f$ T_i(k,\tau)\f$ * will be stored. * * @param pba Input: pointer to background structure (for z to tau conversion) * @param ppt Input: pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_k_and_tau( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_k; int index_tau; double tau_min; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); /** - check the maximum redshift z_max_pk at which \f$P(k,z)\f$ and \f$ T_i(k,z)\f$ should be computable by interpolation. If it is equal to zero, only \f$ P(k,z=0)\f$ needs to be computed. If it is higher, we will store in a table various P(k,tau) at several values of tau generously encompassing the range 0<z<z_max_pk */ /* if z_max_pk<0, return error */ class_test((psp->z_max_pk < 0), psp->error_message, "asked for negative redshift z=%e",psp->z_max_pk); /* if z_max_pk=0, there is just one value to store */ if (psp->z_max_pk == 0.) { psp->ln_tau_size=1; } /* if z_max_pk>0, store several values (with a comfortable margin above z_max_pk) in view of interpolation */ else{ /* find the first relevant value of tau (last value in the table tau_ampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */ class_call(background_tau_of_z(pba,psp->z_max_pk,&tau_min), pba->error_message, psp->error_message); index_tau=0; class_test((tau_min <= ppt->tau_sampling[index_tau]), psp->error_message, "you asked for zmax=%e, i.e. taumin=%e, smaller than or equal to the first possible value =%e; it should be strictly bigger for a successfull interpolation",psp->z_max_pk,tau_min,ppt->tau_sampling[0]); while (ppt->tau_sampling[index_tau] < tau_min){ index_tau++; } index_tau --; class_test(index_tau<0, psp->error_message, "by construction, this should never happen, a bug must have been introduced somewhere"); /* whenever possible, take a few more values in to avoid boundary effects in the interpolation */ if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; psp->ln_tau_size=ppt->tau_size-index_tau; } /** - allocate and fill table of tau values at which \f$P(k,\tau)\f$ and \f$T_i(k,\tau)\f$ are stored */ class_alloc(psp->ln_tau,sizeof(double)*psp->ln_tau_size,psp->error_message); for (index_tau=0; index_tau<psp->ln_tau_size; index_tau++) { psp->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size]); } /** - allocate and fill table of k values at which \f$ P(k,\tau)\f$ is stored */ psp->ln_k_size = ppt->k_size[ppt->index_md_scalars]; class_alloc(psp->ln_k,sizeof(double)*psp->ln_k_size,psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_test(ppt->k[ppt->index_md_scalars][index_k] <= 0., psp->error_message, "stop to avoid segmentation fault"); psp->ln_k[index_k]=log(ppt->k[ppt->index_md_scalars][index_k]); } return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input: pointer to perturbation structure (contain source functions) * @param ppm Input: pointer to primordial structure * @param pnl Input: pointer to nonlinear structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_pk( struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_k; int index_tau; double * primordial_pk; /* array with argument primordial_pk[index_ic_ic] */ double source_ic1; double source_ic2; double ln_pk_tot; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate temporary vectors where the primordial spectrum and the background quantities will be stored */ class_alloc(primordial_pk,psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); /** - allocate and fill array of \f$P(k,\tau)\f$ values */ class_alloc(psp->ln_pk, sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md], psp->error_message); if (pnl->method != nl_none) { class_alloc(psp->ln_pk_nl, sizeof(double)*psp->ln_tau_size*psp->ln_k_size, psp->error_message); } else { psp->ln_pk_nl = NULL; } for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_call(primordial_spectrum_at_k(ppm,index_md,logarithmic,psp->ln_k[index_k],primordial_pk), ppm->error_message, psp->error_message); ln_pk_tot =0; /* curvature primordial spectrum: P_R(k) = 1/(2pi^2) k^3 <R R> so, primordial curvature correlator: <R R> = (2pi^2) k^-3 P_R(k) so, delta_m correlator: P(k) = <delta_m delta_m> = (2pi^2) k^-3 (source_m)^2 P_R(k) For isocurvature or cross adiabatic-isocurvature parts, replace one or two 'R' by 'S_i's */ /* part diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = log(2.*_PI_*_PI_/exp(3.*psp->ln_k[index_k]) *source_ic1*source_ic1 *exp(primordial_pk[index_ic1_ic2])); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } /* part non-diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; source_ic2 = ppt->sources[index_md] [index_ic2 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = primordial_pk[index_ic1_ic2]*SIGN(source_ic1)*SIGN(source_ic2); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } else { psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } /* if non-linear corrections required, compute the total non-linear matter power spectrum */ if (pnl->method != nl_none) { psp->ln_pk_nl[index_tau * psp->ln_k_size + index_k] = ln_pk_tot + 2.*log(pnl->nl_corr_density[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]); } } } /**- if interpolation of \f$P(k,\tau)\f$ will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->ddln_pk, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } /* compute sigma8 (mean variance today in sphere of radius 8/h Mpc */ class_call(spectra_sigma(pba,ppm,psp,8./pba->h,0.,&(psp->sigma8)), psp->error_message, psp->error_message); if (psp->spectra_verbose>0) fprintf(stdout," -> sigma8=%g (computed till k = %g h/Mpc)\n", psp->sigma8, exp(psp->ln_k[psp->ln_k_size-1])/pba->h); /**- if interpolation of \f$ P_{NL}(k,\tau)\f$ will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (pnl->method != nl_none) { if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk_nl,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ln_k_size, psp->ddln_pk_nl, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } } free (primordial_pk); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) (does not check that k_max is large * enough) * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param z Input: redshift * @param R Input: radius in Mpc * @param sigma Output: variance in a sphere of radius R (dimensionless) */ int spectra_sigma( struct background * pba, struct primordial * ppm, struct spectra * psp, double R, double z, double * sigma ) { double pk; double * pk_ic = NULL; double * array_for_sigma; int index_num; int index_k; int index_y; int index_ddy; int i; double k,W,x; if (psp->ic_ic_size[psp->index_md_scalars]>1) class_alloc(pk_ic, psp->ic_ic_size[psp->index_md_scalars]*sizeof(double), psp->error_message); i=0; index_k=i; i++; index_y=i; i++; index_ddy=i; i++; index_num=i; class_alloc(array_for_sigma, psp->ln_k_size*index_num*sizeof(double), psp->error_message); for (i=0;i<psp->ln_k_size;i++) { k=exp(psp->ln_k[i]); if (i == (psp->ln_k_size-1)) k *= 0.9999999; // to prevent rounding error leading to k being bigger than maximum value x=k*R; W=3./x/x/x*(sin(x)-x*cos(x)); class_call(spectra_pk_at_k_and_z(pba,ppm,psp,k,z,&pk,pk_ic), psp->error_message, psp->error_message); array_for_sigma[i*index_num+index_k]=k; array_for_sigma[i*index_num+index_y]=k*k*pk*W*W; } class_call(array_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); class_call(array_integrate_all_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, sigma, psp->error_message), psp->error_message, psp->error_message); free(array_for_sigma); if (psp->ic_ic_size[psp->index_md_scalars]>1) free(pk_ic); *sigma = sqrt(*sigma/(2.*_PI_*_PI_)); return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input: pointer to background structure (will provide density of each species) * @param ppt Input: pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_matter_transfers( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic; int index_k; int index_tau; int last_index_back; double * pvecback_sp_long; /* array with argument pvecback_sp_long[pba->index_bg] */ double delta_i,theta_i,rho_i; double delta_rho_tot,rho_tot; double rho_plus_p_theta_tot,rho_plus_p_tot; int n_ncdm; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate and fill array of \f$ T_i(k,\tau)\f$ values */ class_alloc(psp->matter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); /** - allocate temporary vectors where the background quantities will be stored */ class_alloc(pvecback_sp_long,pba->bg_size*sizeof(double),psp->error_message); for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { class_call(background_at_tau(pba, ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size], /* for this last argument we could have passed exp(psp->ln_tau[index_tau]) but we would then loose precision in the exp(log(x)) operation) */ pba->long_info, pba->inter_normal, &last_index_back, pvecback_sp_long), pba->error_message, psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { delta_rho_tot=0.; rho_tot=0.; rho_plus_p_theta_tot=0.; rho_plus_p_tot=0.; /* T_g(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_g]; if (ppt->has_source_delta_g == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_g] = delta_i; delta_rho_tot += rho_i * delta_i; rho_tot += rho_i; } if (ppt->has_source_theta_g == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_g] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; rho_plus_p_tot += 4./3. * rho_i; } /* T_b(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_b]; if (ppt->has_source_delta_b == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_b] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_b == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_b] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; /* T_cdm(k,tau) */ if (pba->has_cdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_cdm]; if (ppt->has_source_delta_cdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_cdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_cdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_cdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_dcdm(k,tau) */ if (pba->has_dcdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dcdm]; if (ppt->has_source_delta_dcdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dcdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dcdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dcdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_scf(k,tau) */ if (pba->has_scf == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_scf]; if (ppt->has_source_delta_scf == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_scf] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_scf == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_scf] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]); } /* T_fld(k,tau) */ if (pba->has_fld == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_fld]; if (ppt->has_source_delta_fld == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_fld] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_fld == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_fld] = theta_i; rho_plus_p_theta_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i * theta_i; } rho_plus_p_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i; } /* T_ur(k,tau) */ if (pba->has_ur == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_ur]; if (ppt->has_source_delta_ur == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ur] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ur == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ur] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_dr(k,tau) */ if (pba->has_dr == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dr]; if (ppt->has_source_delta_dr == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dr] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dr == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dr] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_ncdm_i(k,tau) */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { rho_i = pvecback_sp_long[pba->index_bg_rho_ncdm1+n_ncdm]; if (ppt->has_source_delta_ncdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ncdm1+n_ncdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ncdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ncdm1+n_ncdm] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]); } } if (ppt->has_source_phi == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_phi] = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_phi] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; } if (ppt->has_source_psi == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_psi] = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_psi] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; } /* could include homogeneous component in rho_tot if uncommented (leave commented to match CMBFAST/CAMB definition) */ /* if (pba->has_lambda == _TRUE_) { */ /* rho_i = pvecback_sp_long[pba->index_bg_rho_lambda]; */ /* rho_tot += rho_i; */ /* } */ /* T_tot(k,tau) */ if (ppt->has_density_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_tot] = delta_rho_tot/rho_tot; } if (ppt->has_velocity_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_tot] = rho_plus_p_theta_tot/rho_plus_p_tot; } } } } /**- if interpolation of \f$ P(k,\tau)\f$ will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddmatter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ic_size[index_md]*psp->ln_k_size*psp->tr_size, psp->ddmatter_transfer, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } free (pvecback_sp_long); return _SUCCESS_; } int spectra_output_tk_titles(struct background *pba, struct perturbs *ppt, enum file_format output_format, char titles[_MAXTITLESTRINGLENGTH_] ){ int n_ncdm; char tmp[40]; if (output_format == class_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); if (ppt->has_density_transfers == _TRUE_) { class_store_columntitle(titles,"d_g",_TRUE_); class_store_columntitle(titles,"d_b",_TRUE_); class_store_columntitle(titles,"d_cdm",pba->has_cdm); class_store_columntitle(titles,"d_fld",pba->has_fld); class_store_columntitle(titles,"d_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"d_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"d_dcdm",pba->has_dcdm); class_store_columntitle(titles,"d_dr",pba->has_dr); class_store_columntitle(titles,"d_scf",pba->has_scf); class_store_columntitle(titles,"d_tot",_TRUE_); class_store_columntitle(titles,"phi",ppt->has_source_phi); class_store_columntitle(titles,"psi",ppt->has_source_psi); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_columntitle(titles,"t_g",_TRUE_); class_store_columntitle(titles,"t_b",_TRUE_); class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))); class_store_columntitle(titles,"t_fld",pba->has_fld); class_store_columntitle(titles,"t_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"t_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"t_dcdm",pba->has_dcdm); class_store_columntitle(titles,"t_dr",pba->has_dr); class_store_columntitle(titles,"t__scf",pba->has_scf); class_store_columntitle(titles,"t_tot",_TRUE_); } } else if (output_format == camb_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); class_store_columntitle(titles,"-T_cdm/k2",_TRUE_); class_store_columntitle(titles,"-T_b/k2",_TRUE_); class_store_columntitle(titles,"-T_g/k2",_TRUE_); class_store_columntitle(titles,"-T_ur/k2",_TRUE_); class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_); class_store_columntitle(titles,"-T_tot/k2",_TRUE_); } return _SUCCESS_; } int spectra_output_tk_data( struct background * pba, struct perturbs * ppt, struct spectra * psp, enum file_format output_format, double z, int number_of_titles, double *data ) { int n_ncdm; double k, k_over_h, k2; double * tkfull=NULL; /* array with argument pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */ double *tk; double *dataptr; int index_md=0; int index_ic; int index_k; int index_tr; int storeidx; if (psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size > 0){ class_alloc(tkfull, psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size*sizeof(double), psp->error_message); } /** - compute \f$T_i(k)\f$ for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */ /* if z_pk = 0, no interpolation needed */ if (z == 0.) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_tr=0; index_tr<psp->tr_size; index_tr++) { for (index_ic=0; index_ic<psp->ic_size[index_md]; index_ic++) { tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr] = psp->matter_transfer[(((psp->ln_tau_size-1)*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr]; } } } } /* if 0 <= z_pk <= z_max_pk, interpolation needed, */ else { class_call(spectra_tk_at_z(pba, psp, z, tkfull), psp->error_message, psp->error_message); } /** - store data */ for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { storeidx = 0; dataptr = data+index_ic*(psp->ln_k_size*number_of_titles)+index_k*number_of_titles; tk = &(tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size]); k = exp(psp->ln_k[index_k]); k2 = k*k; k_over_h = k/pba->h; class_store_double(dataptr, k_over_h, _TRUE_,storeidx); /* indices for species associated with a velocity transfer function in Fourier space */ if (output_format == class_format) { if (ppt->has_density_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_delta_g],ppt->has_source_delta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_b],ppt->has_source_delta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_cdm],ppt->has_source_delta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_fld],ppt->has_source_delta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_ur],ppt->has_source_delta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_delta_dcdm],ppt->has_source_delta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_dr],ppt->has_source_delta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_scf],ppt->has_source_delta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_tot],_TRUE_,storeidx); class_store_double(dataptr,tk[psp->index_tr_phi],ppt->has_source_phi,storeidx); class_store_double(dataptr,tk[psp->index_tr_psi],ppt->has_source_psi,storeidx); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_theta_g],ppt->has_source_theta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_b],ppt->has_source_theta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_cdm],ppt->has_source_theta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_fld],ppt->has_source_theta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_ur],ppt->has_source_theta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_theta_dcdm],ppt->has_source_theta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_dr],ppt->has_source_theta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_scf],ppt->has_source_theta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_tot],_TRUE_,storeidx); } } else if (output_format == camb_format) { /* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */ class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_tot]/k2,_TRUE_,storeidx,0.0); } } } //Necessary because the size could be zero (if psp->tr_size is zero) if (tkfull != NULL) free(tkfull); return _SUCCESS_; } int spectra_firstline_and_ic_suffix(struct perturbs *ppt, int index_ic, char first_line[_LINE_LENGTH_MAX_], FileName ic_suffix){ first_line[0]='\0'; ic_suffix[0]='\0'; if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { strcpy(ic_suffix,"ad"); strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) "); } if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { strcpy(ic_suffix,"bi"); strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)"); } if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { strcpy(ic_suffix,"cdi"); strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)"); } if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { strcpy(ic_suffix,"nid"); strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)"); } if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { strcpy(ic_suffix,"niv"); strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)"); } return _SUCCESS_; }
GB_unaryop__ainv_uint64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int32 // op(A') function: GB_tran__ainv_uint64_int32 // C type: uint64_t // A type: int32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int32 ( uint64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tool_not_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s // 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV" // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ #ifdef CODE #include "stdio.h" #include "omp.h" #include "omp-tools.h" int main() { #pragma omp parallel num_threads(2) { #pragma omp master { int result = omp_control_tool(omp_control_tool_start, 0, NULL); printf("0: control_tool()=%d\n", result); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: Do not initialize tool // CHECK: {{^}}0: control_tool()=-2 return 0; } #endif /* CODE */ #ifdef TOOL #include <omp-tools.h> #include "stdio.h" ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { printf("0: Do not initialize tool\n"); return NULL; } #endif /* TOOL */
master-1.c
/* { dg-do compile } */ extern void bar(int); void foo (void) { #pragma omp master bar(0); #pragma omp master { bar(1); bar(2); } /* Yes, this is legal -- structured-block contains statement contains openmp-construct contains master-construct. */ #pragma omp master #pragma omp master #pragma omp master ; }
kernels.c
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "common.h" void cpu_stencil(float c0,float c1, float *A0,float * Anext,const int nx, const int ny, const int nz) { int i; #pragma omp parallel for for(i=1;i<nx-1;i++) { int j,k; for(j=1;j<ny-1;j++) { for(k=1;k<nz-1;k++) { //i #pragma omp critical Anext[Index3D (nx, ny, i, j, k)] = (A0[Index3D (nx, ny, i, j, k + 1)] + A0[Index3D (nx, ny, i, j, k - 1)] + A0[Index3D (nx, ny, i, j + 1, k)] + A0[Index3D (nx, ny, i, j - 1, k)] + A0[Index3D (nx, ny, i + 1, j, k)] + A0[Index3D (nx, ny, i - 1, j, k)])*c1 - A0[Index3D (nx, ny, i, j, k)]*c0; } } } }
reduction_teams.c
#include <stdio.h> #include <omp.h> #define N 1000000ll #define SUM (N * (N-1)/2) void checkHost(int gpu_error, int* errors, long long a){ int host_error = 0; if (a != SUM){ printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM); host_error = 1; (*errors)++; } if(!host_error && !gpu_error){ printf("-----> Success\n"); } else{ printf("-----> Failure\n"); } } void reduction(int num_teams, int num_threads, int* errors){ long long result = 0; int gpu_error = 0; int device_teams = 0; int device_threads = 0; #pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: result) map(from:device_teams,device_threads) { long long a, i; a = 0; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; device_threads = omp_get_num_threads(); } result = a; if (a != SUM && omp_get_team_num() <= 50){ //limit teams that print printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } device_teams = omp_get_num_teams(); } //end of target // Spec says you cannot have more teams than num_teams clause if ( device_teams > num_teams ) { (*errors)++; gpu_error++; printf("ERROR: num_teams requested:%d actual teams on device:%d\n", num_teams, device_teams); } // Spec says you cannot have more threads than thread_limit clause if ( device_threads > num_threads ) { (*errors)++; gpu_error++; printf("ERROR: num_threads limit:%d Actual threads on device:%d\n", num_threads, device_threads); } checkHost(gpu_error, errors, result); } int main (void) { int errors = 0; int gpu_error = 0; printf("\n---------- Multiple Teams ----------\n"); printf("\nRunning 2 Teams with 64 thread per team\n"); reduction(2, 64, &errors); printf("\nRunning 2 Teams with 128 threads per team\n"); reduction(2, 128, &errors); printf("\nRunning 2 Teams with 256 threads per team\n"); reduction(2, 256, &errors); printf("\nRunning 256 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(256, 256, &errors); printf("\nRunning 4096 Teams with 64 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 64, &errors); printf("\nRunning 4096 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 256, &errors); if(!errors){ printf("\nRESULT: ALL RUNS SUCCESSFUL!\n"); return 0; } else{ printf("\nRESULT: FAILURES OCCURED!\n"); return -1; } }
coreFLT.c
#ifdef DT32 #define flt float #define DT_CALC DT_FLOAT32 #define epsilon FLT_EPSILON #else #define flt double #define DT_CALC DT_FLOAT64 #define epsilon DBL_EPSILON #endif //#include <float.h> #include "core.h" #ifdef USING_TIMERS #include <time.h> #endif #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif #ifdef USING_WASM #undef SIMD #define staticx #include <string.h> #include <stdbool.h> #include <nifti2_wasm.h> #include <emscripten.h> #include <ctype.h>//#include <cctype>// <ctype.h> for isspace() #include <stdbool.h> #else #define SIMD #define xmemcpy memcpy #define staticx static #include <nifti2_io.h> #define bandpass #define bwlabelx #define tensor_decomp //tensor_decomp support is optional #endif #ifdef SIMD //explicitly vectorize (SSE,AVX,Neon) #ifdef __x86_64__ #ifdef DT32 #define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction #else #define kSSE64 2 //128-bit SSE handles 2 64-bit floats per instruction #endif #else #ifdef DT32 #include "sse2neon.h" #define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction #else #undef SIMD #endif #endif #endif #ifndef USING_WASM #ifdef __x86_64__ #include <immintrin.h> #else #include "arm_malloc.h" #endif #endif #ifdef bandpass #include "bw.h" #endif #ifdef bwlabelx #include "bwlabel.h" #endif #ifdef tensor_decomp #include "tensor.h" #endif //#define TFCE //formerly we used Christian Gaser's tfce, new bespoke code handles connectivity //#ifdef TFCE //we now use in-built tfce function // #include "tfce_pthread.h" //#endif #ifdef SIMD #ifdef DT32 staticx void nifti_sqrt(flt *v, size_t n) { flt *vin = v; //#pragma omp parallel for for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) { __m128 v4 = _mm_loadu_ps(vin); __m128 ma = _mm_sqrt_ps(v4); _mm_storeu_ps(vin, ma); vin += kSSE32; } int tail = (n % kSSE32); while (tail > 0) { v[n - tail] = sqrt(v[n - tail]); tail--; } } // nifti_sqrt() staticx void nifti_mul(flt *v, size_t n, flt slope1) { flt *vin = v; __m128 slope = _mm_set1_ps(slope1); //#pragma omp parallel for for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) { __m128 v4 = _mm_loadu_ps(vin); __m128 m = _mm_mul_ps(v4, slope); _mm_storeu_ps(vin, m); vin += kSSE32; } int tail = (n % kSSE32); while (tail > 0) { v[n - tail] *= slope1; tail--; } } //nifti_mul() staticx void nifti_add(flt *v, int64_t n, flt intercept1) { //add, out = in + intercept if (intercept1 == 0.0f) return; flt *vin = v; __m128 intercept = _mm_set1_ps(intercept1); //#pragma omp parallel for for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) { __m128 v4 = _mm_loadu_ps(vin); __m128 ma = _mm_add_ps(v4, intercept); _mm_storeu_ps(vin, ma); vin += kSSE32; } int tail = (n % kSSE32); while (tail > 0) { v[n - tail] = v[n - tail] + intercept1; tail--; } } //nifti_add() staticx void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) { //multiply+add, out = in * slope + intercept if ((slope1 == 1.0f) && (intercept1 == 0.0f)) return; flt *vin = v; __m128 intercept = _mm_set1_ps(intercept1); __m128 slope = _mm_set1_ps(slope1); //#pragma omp parallel for for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) { __m128 v4 = _mm_loadu_ps(vin); __m128 m = _mm_mul_ps(v4, slope); __m128 ma = _mm_add_ps(m, intercept); _mm_storeu_ps(vin, ma); vin += kSSE32; } int tail = (n % kSSE32); while (tail > 0) { v[n - tail] = (v[n - tail] * slope1) + intercept1; tail--; } } //nifti_fma() #else //if SIMD32 else SIMD64 staticx void nifti_sqrt(flt *v, size_t n) { flt *vin = v; //#pragma omp parallel for for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) { __m128d v2 = _mm_loadu_pd(vin); __m128d ma = _mm_sqrt_pd(v2); _mm_storeu_pd(vin, ma); vin += kSSE64; } int tail = (n % kSSE64); while (tail > 0) { v[n - tail] = sqrt(v[n - tail]); tail--; } } // nifti_sqrt() staticx void nifti_mul(flt *v, size_t n, flt slope1) { flt *vin = v; __m128d slope = _mm_set1_pd(slope1); //#pragma omp parallel for for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) { __m128d v2 = _mm_loadu_pd(vin); __m128d m = _mm_mul_pd(v2, slope); _mm_storeu_pd(vin, m); vin += kSSE64; } int tail = (n % kSSE64); while (tail > 0) { v[n - tail] *= slope1; tail--; } } //nifti_mul() staticx void nifti_add(flt *v, int64_t n, flt intercept1) { //add, out = in + intercept if (intercept1 == 0.0f) return; flt *vin = v; __m128d intercept = _mm_set1_pd(intercept1); //#pragma omp parallel for for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) { __m128d v2 = _mm_loadu_pd(vin); __m128d ma = _mm_add_pd(v2, intercept); _mm_storeu_pd(vin, ma); vin += kSSE64; } int tail = (n % kSSE64); while (tail > 0) { v[n - tail] = v[n - tail] + intercept1; tail--; } } //nifti_add() staticx void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) { //multiply+add, out = in * slope + intercept if ((slope1 == 1.0f) && (intercept1 == 0.0f)) return; flt *vin = v; __m128d intercept = _mm_set1_pd(intercept1); __m128d slope = _mm_set1_pd(slope1); //#pragma omp parallel for for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) { __m128d v2 = _mm_loadu_pd(vin); __m128d m = _mm_mul_pd(v2, slope); __m128d ma = _mm_add_pd(m, intercept); _mm_storeu_pd(vin, ma); vin += kSSE64; } int tail = (n % kSSE64); while (tail > 0) { v[n - tail] = (v[n - tail] * slope1) + intercept1; tail--; } } //nifti_fma() #endif //end SIMD64 #else //if SIMD vectorized, else scalar staticx void nifti_sqrt(flt *v, size_t n) { //#pragma omp parallel for for (size_t i = 0; i < n; i++) v[i] = sqrt(v[i]); } //nifti_sqrt() staticx void nifti_mul(flt *v, size_t n, flt slope1) { //#pragma omp parallel for for (size_t i = 0; i < n; i++) v[i] *= slope1; } //nifti_mul() staticx void nifti_add(flt *v, size_t n, flt intercept1) { //#pragma omp parallel for for (size_t i = 0; i < n; i++) v[i] += intercept1; } //nifti_add() staticx void nifti_fma(flt *v, size_t n, flt slope1, flt intercept1) { //#pragma omp parallel for for (size_t i = 0; i < n; i++) v[i] = (v[i] * slope1) + intercept1; } //nifti_fma #endif //if vector SIMD else scalar staticx flt vx(flt *f, int p, int q) { flt ret = ((f[q] + q * q) - (f[p] + p * p)) / (2.0 * q - 2.0 * p); if (isnan(ret)) ret = INFINITY; return ret; } staticx inline void transposeXY( flt *img3Din, flt *img3Dout, int *nxp, int *nyp, int nz) { //transpose X and Y dimensions: rows <-> columns //Note: in future we could use SIMD to transpose values in tiles // https://stackoverflow.com/questions/16737298/what-is-the-fastest-way-to-transpose-a-matrix-in-c int nx = *nxp; int ny = *nyp; size_t vi = 0; //volume offset for (int z = 0; z < nz; z++) { int zo = z * nx * ny; for (int y = 0; y < ny; y++) { int xo = 0; for (int x = 0; x < nx; x++) { img3Dout[zo + xo + y] = img3Din[vi]; xo += ny; vi += 1; } } } *nxp = ny; *nyp = nx; } staticx inline void transposeXZ( flt *img3Din, flt *img3Dout, int *nxp, int ny, int *nzp) { //transpose X and Z dimensions: slices <-> columns int nx = *nxp; int nz = *nzp; int nyz = ny * nz; size_t vi = 0; //volume offset for (int z = 0; z < nz; z++) { for (int y = 0; y < ny; y++) { int yo = y * nz; int zo = 0; for (int x = 0; x < nx; x++) { img3Dout[z + yo + zo] = img3Din[vi]; zo += nyz; vi += 1; } } } *nxp = nz; *nzp = nx; } staticx void edt(flt *f, int n) { int q, p, k; flt s, dx; flt *d = (flt *)_mm_malloc((n+2) * sizeof(flt), 64); flt *z = (flt *)_mm_malloc((n+2) * sizeof(flt), 64); int *v = (int *)_mm_malloc((n+2) * sizeof(int), 64); /*# Find the lower envelope of a sequence of parabolas. # f...source data (returns the Y of the parabola vertex at X) # d...destination data (final distance values are written here) # z...temporary used to store X coords of parabola intersections # v...temporary used to store X coords of parabola vertices # i...resulting X coords of parabola vertices # n...number of pixels in "f" to process # Always add the first pixel to the enveloping set since it is # obviously lower than all parabolas processed so far.*/ k = 0; v[0] = 0; z[0] = -INFINITY; z[1] = INFINITY; for (q = 1; q < n; q++) { /* If the new parabola is lower than the right-most parabola in # the envelope, remove it from the envelope. To make this # determination, find the X coordinate of the intersection (s) # between the parabolas with vertices at (q,f[q]) and (p,f[p]).*/ p = v[k]; s = vx(f, p, q); //while (s <= z[k]) { while ((s <= z[k]) && (k > 0)) { k = k - 1; p = v[k]; s = vx(f, p, q); } //# Add the new parabola to the envelope. k = k + 1; v[k] = q; z[k] = s; z[k + 1] = INFINITY; } /*# Go back through the parabolas in the envelope and evaluate them # in order to populate the distance values at each X coordinate.*/ k = 0; for (q = 0; q < n; q++) { while (z[k + 1] < q) k = k + 1; dx = (q - v[k]); d[q] = dx * dx + f[v[k]]; } for (q = 0; q < n; q++) f[q] = d[q]; _mm_free(d); _mm_free(z); _mm_free(v); } //edt() staticx void edt1(flt *df, int n) { //first dimension is simple int q, prevX; flt prevY, v; prevX = 0; prevY = INFINITY; //forward for (q = 0; q < n; q++) { if (df[q] == 0) { prevX = q; prevY = 0; } else df[q] = sqr(q - prevX) + prevY; } //reverse prevX = n; prevY = INFINITY; for (q = (n - 1); q >= 0; q--) { v = sqr(q - prevX) + prevY; if (df[q] < v) { prevX = q; prevY = df[q]; } else df[q] = v; } } //edt1() staticx int nifti_edt(nifti_image *nim) { //https://github.com/neurolabusc/DistanceFields if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt *img = (flt *)nim->data; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; if ((nVol < 1) || ((nvox3D * nVol) != nim->nvox)) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; flt threshold = 0.0; for (size_t i = 0; i < nim->nvox; i++) { if (img[i] > threshold) img[i] = INFINITY; else img[i] = 0; } size_t nRow = nim->nx; nRow *= MAX(nim->ny, 1); nRow *= MAX(nim->nz, 1); nRow *= MAX(nVol, 1); //EDT in left-right direction flt *imgRow = img; for (int r = 0; r < nRow; r++) edt1(imgRow += nx, nx); //EDT in anterior-posterior direction nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows for (int v = 0; v < nVol; v++) { //transpose each volume separately flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset transposeXY(&img[vo], img3D, &nx, &ny, nz); //perform EDT for all "rows" flt *imgRow = img3D; for (int r = 0; r < nRow; r++) edt(imgRow += nx, nx); transposeXY(img3D, &img[vo], &nx, &ny, nz); _mm_free(img3D); } //for each volume //EDT in head-foot direction nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows #pragma omp parallel for for (int v = 0; v < nVol; v++) { //transpose each volume separately flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset transposeXZ(&img[vo], img3D, &nx, ny, &nz); //perform EDT for all "rows" flt *imgRow = img3D; for (int r = 0; r < nRow; r++) edt(imgRow += nx, nx); transposeXZ(img3D, &img[vo], &nx, ny, &nz); _mm_free(img3D); } //for each volume return 0; } //nifti_edt() //kernelWid influences width of kernel, use negative values for round, positive for ceil // kenrnelWid of 2.5 means the kernel will be (2 * ceil(2.5 * sigma))+1 voxels wide // kenrnelWid of -6.0 means the kernel will be (2 * round(6.0 * sigma))+1 voxels wide // 2.5 AFNI ceil(2.5) https://github.com/afni/afni/blob/25e77d564f2c67ff480fa99a7b8e48ec2d9a89fc/src/edt_blur.c#L1391 // -6 SPM round(6) https://github.com/spm/spm12/blob/3085dac00ac804adb190a7e82c6ef11866c8af02/spm_smooth.m#L97 // -6 FSL round(6) (estimated) // -3 opencv round(3) or round(4) depending on datatype https://github.com/opencv/opencv/blob/9c23f2f1a682faa9f0b2c2223a857c7d93ba65a6/modules/imgproc/src/smooth.cpp#L3782 //bioimagesuite floor(1.5) https://github.com/bioimagesuiteweb/bisweb/blob/210d678c92fd404287fe5766136379ec94750eb2/js/utilities/bis_imagesmoothreslice.js#L133 //Gaussian blur, both serial and parallel variants, https://github.com/neurolabusc/niiSmooth staticx void blurS(flt *img, int nx, int ny, flt xmm, flt Sigmamm, flt kernelWid) { //serial blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (Sigmamm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (Sigmamm / xmm); //mm to vox //round(6*sigma), ceil(4*sigma) seems spot on larger than fslmaths //int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 int cutoffvox; if (kernelWid < 0) cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 else cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 //printf(".Blur Cutoff (%g) %d\n", 4*sigma, cutoffvox); //validated on SPM12's 1.5mm isotropic mask_ICV.nii (discrete jump in number of non-zero voxels) //fslmaths mask -s 2.26 f6.nii //Blur Cutoff (6.02667) 7 //fslmaths mask -s 2.24 f4.nii //Blur Cutoff (5.97333) 6 cutoffvox = MAX(cutoffvox, 1); flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian flt expd = 2 * sigma * sigma; for (int i = 0; i <= cutoffvox; i++) k[i] = exp(-1.0f * (i * i) / expd); //calculate start, end for each voxel in int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++) { kStart[i] = MAX(-cutoffvox, -i); //do not read below 0 kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight kWeight[i] = kWeight[i - 1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur for (int y = 0; y < ny; y++) { //printf("-+ %d:%d\n", y, ny); xmemcpy(tmp, img, nx * sizeof(flt)); for (int x = 0; x < nx; x++) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++) sum += tmp[x + i] * k[abs(i)]; img[x] = sum * kWeight[x]; } img += nx; } //blurX //free kernel _mm_free(tmp); _mm_free(k); _mm_free(kStart); _mm_free(kEnd); _mm_free(kWeight); } //blurS() #if defined(_OPENMP) staticx void blurP(flt *img, int nx, int ny, flt xmm, flt FWHMmm, flt kernelWid) { //parallel blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (FWHMmm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (FWHMmm / xmm); //mm to vox int cutoffvox; if (kernelWid < 0) cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 else cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 cutoffvox = MAX(cutoffvox, 1); flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian flt expd = 2 * sigma * sigma; for (int i = 0; i <= cutoffvox; i++) k[i] = exp(-1.0f * (i * i) / expd); //calculate start, end for each voxel in int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++) { kStart[i] = MAX(-cutoffvox, -i); //do not read below 0 kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight kWeight[i] = kWeight[i - 1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row #pragma omp parallel for for (int y = 0; y < ny; y++) { flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur flt *imgx = img; imgx += (nx * y); xmemcpy(tmp, imgx, nx * sizeof(flt)); for (int x = 0; x < nx; x++) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++) sum += tmp[x + i] * k[abs(i)]; imgx[x] = sum * kWeight[x]; } _mm_free(tmp); } //free kernel _mm_free(k); _mm_free(kStart); _mm_free(kEnd); _mm_free(kWeight); } //blurP #endif // if OPENMP: blurP (parallel blur) is multi-threaded staticx int nifti_smooth_gauss(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt kernelWid) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 2) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) { printfx("Image size too small for Gaussian blur.\n"); return 1; } if (nim->datatype != DT_CALC) return 1; if ((SigmammX == 0) && (SigmammY == 0) && (SigmammZ == 0)) return 0; //all done: no smoothing, e.g. small kernel for difference of Gaussian if (SigmammX < 0) //negative values for voxels, not mm SigmammX = -SigmammX * nim->dx; if (SigmammY < 0) //negative values for voxels, not mm SigmammY = -SigmammY * nim->dy; if (SigmammZ < 0) //negative values for voxels, not mm SigmammZ = -SigmammZ * nim->dz; flt *img = (flt *)nim->data; int nVol = nim->nvox / nvox3D; if ((nVol < 1) || ((nvox3D * nVol) != nim->nvox)) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; if ((SigmammX <= 0.0) || (nx < 2)) goto DO_Y_BLUR; //BLUR X size_t nRow = MAX(nim->ny, 1); nRow *= MAX(nim->nz, 1); nRow *= MAX(nVol, 1); #if defined(_OPENMP) if (omp_get_max_threads() > 1) blurP(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid); else blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid); #else blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid); #endif DO_Y_BLUR: //BLUR Y if ((SigmammY <= 0.0) || (ny < 2)) goto DO_Z_BLUR; nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows #pragma omp parallel for for (int v = 0; v < nVol; v++) { //transpose each volume separately flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset transposeXY(&img[vo], img3D, &nx, &ny, nz); blurS(img3D, nim->ny, nRow, nim->dy, SigmammY, kernelWid); transposeXY(img3D, &img[vo], &nx, &ny, nz); _mm_free(img3D); } //for each volume DO_Z_BLUR: //BLUR Z: if ((SigmammZ <= 0.0) || (nim->nz < 2)) return 0; //all done! nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows #pragma omp parallel for for (int v = 0; v < nVol; v++) { //transpose each volume separately //printf("volume %d uses thread %d\n", v, omp_get_thread_num()); flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset transposeXZ(&img[vo], img3D, &nx, ny, &nz); blurS(img3D, nim->nz, nRow, nim->dz, SigmammZ, kernelWid); transposeXZ(img3D, &img[vo], &nx, ny, &nz); _mm_free(img3D); } //for each volume return 0; } // nifti_smooth_gauss() staticx int nifti_robust_range(nifti_image *nim, flt *pct2, flt *pct98, int ignoreZeroVoxels) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;31f309c1.1307 // robust range is essentially the 2nd and 98th percentiles // "but ensuring that the majority of the intensity range is captured, even for binary images." // fsl uses 1000 bins, also limits for volumes less than 100 voxels [email protected] 20190107 //fslstats trick -r // 0.000000 1129.141968 //niimath >fslstats trick -R // 0.000000 2734.000000 *pct2 = 0.0; *pct98 = 1.0; if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt *f32 = (flt *)nim->data; flt mn = INFINITY; flt mx = -INFINITY; size_t nZero = 0; size_t nNan = 0; for (size_t i = 0; i < nim->nvox; i++) { if (isnan(f32[i])) { nNan++; continue; } if (f32[i] == 0.0) { nZero++; if (ignoreZeroVoxels) continue; } mn = fmin(f32[i], mn); mx = fmax(f32[i], mx); } if ((nZero > 0) && (mn > 0.0) && (!ignoreZeroVoxels)) mn = 0.0; if (mn > mx) return 0; //all NaN if (mn == mx) { *pct2 = mn; *pct98 = mx; return 0; } if (!ignoreZeroVoxels) nZero = 0; nZero += nNan; size_t n2pct = round((nim->nvox - nZero) * 0.02); if ((n2pct < 1) || (mn == mx) || ((nim->nvox - nZero) < 100)) { //T Hanayik mentioned issue with very small volumes *pct2 = mn; *pct98 = mx; return 0; } #define nBins 1001 flt scl = (nBins - 1) / (mx - mn); int hist[nBins]; for (int i = 0; i < nBins; i++) hist[i] = 0; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++) { if (isnan(f32[i])) continue; if (f32[i] == 0.0) continue; hist[(int)round((f32[i] - mn) * scl)]++; } } else { for (int i = 0; i < nim->nvox; i++) { if (isnan(f32[i])) continue; hist[(int)round((f32[i] - mn) * scl)]++; } } size_t n = 0; size_t lo = 0; while (n < n2pct) { n += hist[lo]; //if (lo < 10) // printf("%zu %zu %zu %d\n",lo, n, n2pct, ignoreZeroVoxels); lo++; } lo--; //remove final increment n = 0; int hi = nBins; while (n < n2pct) { hi--; n += hist[hi]; } /*if ((lo+1) < hi) { size_t nGray = 0; for (int i = lo+1; i < hi; i++ ) { nGray += hist[i]; //printf("%d %d\n", i, hist[i]); } float fracGray = (float)nGray/(float)(nim->nvox - nZero); printf("histogram[%d..%d] = %zu %g\n", lo, hi, nGray, fracGray); }*/ if (lo == hi) { //MAJORITY are not black or white int ok = -1; while (ok != 0) { if (lo > 0) { lo--; if (hist[lo] > 0) ok = 0; } if ((ok != 0) && (hi < (nBins - 1))) { hi++; if (hist[hi] > 0) ok = 0; } if ((lo == 0) && (hi == (nBins - 1))) ok = 0; } //while not ok }//if lo == hi *pct2 = (lo) / scl + mn; *pct98 = (hi) / scl + mn; //printf("full range %g..%g (voxels 0 or NaN =%zu) robust range %g..%g\n", mn, mx, nZero, *pct2, *pct98); return 0; } staticx flt* padImg3D( flt *imgIn, int *nx, int *ny, int *nz) { //create an image with new first and last columns, rows, slices int nxIn = (* nx); int nxOut = (* nx) + 2; int nyOut = (* ny) + 2; int nzOut = (* nz) + 2; int nvox3D = nxOut * nyOut * nzOut; flt *imgOut= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); memset(imgOut, 0, nvox3D * sizeof(flt)); //zero array flt *imgOutP = imgOut; flt *imgInP = imgIn; imgOutP += 1; for (int z = 0; z < nzOut; z++) for (int y = 0; y < nyOut; y++) { if ((z > 0) && (y > 0) && (z < (nzOut - 1)) && (y < (nyOut - 1))) { xmemcpy(imgOutP, imgInP, nxIn * sizeof(flt)); //dest, src, count imgInP += nxIn; } imgOutP += nxOut; } * nx = nxOut; * ny = nyOut; * nz = nzOut; return imgOut; } staticx int nifti_binarize(nifti_image *nim, flt threshold) { //binarize image using Otsu's method if (nim->nvox < 1) return 1; flt *inimg = (flt *)nim->data; for (int i = 0; i < nim->nvox; i++) { if (isnan(inimg[i])) continue; inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0; } nim->scl_inter = 0.0; nim->scl_slope = 1.0; nim->cal_min = 0.0; nim->cal_max = 1.0; return 0; } staticx flt brightest_voxel(nifti_image *nim) { if (nim->nvox < 1) return 0.0; flt *img = (flt *)nim->data; flt mx = -INFINITY; //in case 1st voxel is NaN for (int i = 0; i < nim->nvox; i++) { if (isnan(img[i])) continue; mx = MAX(mx, img[i]); } return mx; } staticx flt darkest_voxel(nifti_image *nim) { if (nim->nvox < 1) return 0.0; flt *img = (flt *)nim->data; flt mn = INFINITY; //in case 1st voxel is NaN for (int i = 0; i < nim->nvox; i++) { if (isnan(img[i])) continue; mn = MIN(mn, img[i]); } return mn; } staticx int nifti_mask_below(nifti_image *nim, flt threshold, int isZeroFill) { //if isZeroFill set dark voxels to zero //else set dark voxels to darkest if (nim->nvox < 1) return 1; flt *inimg = (flt *)nim->data; flt fill = 0.0; if (!isZeroFill) fill = darkest_voxel(nim); for (int i = 0; i < nim->nvox; i++) { if ((isnan(inimg[i])) || (inimg[i] >= threshold)) continue; inimg[i] = 0.0; } return 0; } staticx int nifti_mask_below_dilate(nifti_image *nim, flt threshold, int isZeroFill) { //mask dark voxels to zero ONLY if surrounded by other dark voxels // this 'feathers' the edges of bright objects, capturing partial volumes // isZeroFill determines if masked voxels are set to zero or the global darkest value if (nim->nvox < 1) return 1; if ((nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) return nifti_mask_below(nim, threshold, isZeroFill); flt *inimg = (flt *)nim->data; uint8_t *vxs = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64); memset(vxs, 0, nim->nvox * sizeof(uint8_t)); for (int i = 0; i < nim->nvox; i++) { if ((isnan(inimg[i])) || (inimg[i] >= threshold)) vxs[i] = 1; } size_t nx = nim->nx; size_t nxy = nx * nim->ny; size_t nvox3D = nxy * MAX(nim->nz, 1); size_t nVol = nim->nvox / nvox3D; for (int v = 0; v < nVol; v++) { uint8_t *vxs2 = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64); uint8_t *tmp = vxs + (v * nvox3D); xmemcpy(vxs2, tmp, nvox3D * sizeof(uint8_t)); //dest,src,bytes size_t iv = (v * nvox3D); for (int z = 1; z < (nim->nz - 1); z++) { for (int y = 1; y < (nim->ny - 1); y++) { size_t iyz = + (z * nxy) + (y * nim->nx); for (int x = 1; x < (nx - 1); x++) { size_t vx = iyz + x; if (vxs[vx + iv] == 1) continue; if ((vxs2[vx -1] == 1) || (vxs2[vx + 1] == 1) || (vxs2[vx - nx] == 1) || (vxs2[vx + nx] == 1) || (vxs2[vx - nxy] == 1) || (vxs2[vx + nxy] == 1)) vxs[vx] = 1; } //x } //y } //z } //v flt fill = 0.0; if (!isZeroFill) fill = darkest_voxel(nim); for (size_t i = 0; i < nim->nvox; i++) { if (vxs[i] == 0) inimg[i] = fill; } _mm_free(vxs); return 0; } staticx int nifti_c2h(nifti_image *nim) { //c2h: Cormack to Hounsfield // https://github.com/neurolabusc/Clinical/blob/master/clinical_c2h.m flt kUninterestingDarkUnits = 900.0; //e.g. -1000..-100 flt kInterestingMidUnits = 200.0; //e.g. unenhanced CT: -100..+100 flt kScaleRatio = 10; flt kMax = kInterestingMidUnits * (kScaleRatio+1); if (nim->nvox < 1) return 1; flt mn = darkest_voxel(nim); if (mn < 0.0) { printfx("Negative brightnesses impossible in the Cormack scale.\n"); return 1; } flt *img = (flt *)nim->data; for (int i = 0; i < nim->nvox; i++) { if (isnan(img[i])) continue; flt boost = img[i] - kUninterestingDarkUnits; boost = MAX(boost, 0.0); boost = MIN(boost, kInterestingMidUnits * kScaleRatio); boost = boost * ((kScaleRatio - 1.0) / kScaleRatio); img[i] = img[i] - boost - 1024.0; } return 0; } // nifti_c2h() staticx int nifti_h2c(nifti_image *nim) { //h2c: Hounsfield to Cormack // https://github.com/neurolabusc/Clinical/blob/master/clinical_h2c.m flt kUninterestingDarkUnits = 900.0; //e.g. -1000..-100 flt kInterestingMidUnits = 200.0; //e.g. unenhanced CT: -100..+100 flt kMin = -1024.0; //some GE scanners place artificial rim around air flt kScaleRatio = 10; if (nim->nvox < 1) return 1; flt mn = darkest_voxel(nim); flt mx = brightest_voxel(nim); if ((mx < 100) || (mn > -500)) { printfx("Image not in Hounsfield units: Intensity range %g..%g\n", mn, mx); return 1; } flt *img = (flt *)nim->data; if (mn < kMin) {//some GE scanners place artificial rim around air for (int i = 0; i < nim->nvox; i++) { if ((isnan(img[i])) || (img[i] >= kMin)) continue; img[i] = kMin; } mn = kMin; } for (int i = 0; i < nim->nvox; i++) { if (isnan(img[i])) continue; img[i] -= mn; //translate so min value is 0 flt boost = img[i] - kUninterestingDarkUnits; boost = MAX(boost, 0.0); boost = MIN(boost, kInterestingMidUnits); boost = boost * (kScaleRatio - 1.0); img[i] += boost; } return 0; } // nifti_h2c() staticx int nifti_otsu(nifti_image *nim, int mode, int makeBinary) { //binarize image using Otsu's method //mode is 1..5 corresponding to 3/4, 2/3, 1/2 1/3 and 1/4 compartments made dark //makeBinary: -1 replace dark with darkest, 0 = replace dark with 0, 1 = binary (0 or 1) if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 1; //Create histogram of intensity frequency // hist[0..kOtsuBins-1]: each bin is number of pixels with this intensity flt mn, mx; if (nifti_robust_range(nim, &mn, &mx, 0) != 0) return 1; if (mn >= mx) return 1; //no variability #define kOtsuBins 256 flt *inimg = (flt *)nim->data; flt scl = (kOtsuBins - 1) / (mx - mn); //create histogram int hist[kOtsuBins]; for (int i = 0; i < kOtsuBins; i++) hist[i] = 0; for (int i = 0; i < nim->nvox; i++) { if (isnan(inimg[i])) continue; int idx = (int)round((inimg[i] - mn) * scl); idx = MIN(idx, kOtsuBins - 1); idx = MAX(idx, 0); hist[idx]++; } //attenuate influence of zero intensity: zero bin clamped to most frequent non-zero bin // int idx0 = (int)round((0.0 - mn) * scl); // int mostFrequentNot0 = 0; // for (int i = 0; i < kOtsuBins; i++) { // if (i == idx0) continue; // if (hist[i] > mostFrequentNot0) mostFrequentNot0 = hist[i]; // } // hist[idx0] = MIN(hist[idx0], mostFrequentNot0); //compute Otsu int thresh = nii_otsu(hist, kOtsuBins, mode); flt threshold = (thresh / scl) + mn; //printf("range %g..%g Otsu threshold %g\n", mn, mx, threshold); if (makeBinary == 1) return nifti_binarize(nim, threshold); return nifti_mask_below_dilate(nim, threshold, (makeBinary == 0)); } // nifti_otsu() staticx int nifti_unsharp(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt amount) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; if (amount == 0.0) return 0; flt *inimg = (flt *)nim->data; void *indat = (void *)nim->data; flt mn = INFINITY; //better that inimg[0] in case NaN flt mx = -INFINITY; for (int i = 0; i < nim->nvox; i++) { mn = MIN(mn, inimg[i]); mx = MAX(mx, inimg[i]); } if (mn >= mx) return 0; //no variability size_t nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); size_t nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; //process each 3D volume independently: reduce memory pressure nim->nvox = nvox3D; flt *simg = (flt *)_mm_malloc(nim->nvox * sizeof(flt), 64); //output image memset(simg, 0, nim->nvox * sizeof(flt)); //zero array nim->data = (void *) simg; for (int v = 0; v < nVol; v++) { xmemcpy(simg, inimg, nim->nvox * sizeof(flt)); nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ, 2.5); //2.5: a relatively narrow kernel for speed for (int i = 0; i < nim->nvox; i++) { //sharpened = original + (original - blurred) * amount inimg[i] += (inimg[i] - simg[i]) * amount; //keep in original range inimg[i] = MAX(inimg[i], mn); inimg[i] = MIN(inimg[i], mx); } inimg += nim->nvox; } _mm_free(simg); //return original data nim->data = indat; //nim->nvox = nvox3D * nVol; return 0; } //nifti_unsharp() staticx int nifti_crop(nifti_image *nim, int tmin, int tsize) { if (tsize == 0) { printfx("tsize must not be 0\n"); return 1; } if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0)) return 1; int nvol = (nim->nvox / nvox3D); //in if (nvol < 2) { printfx("crop only appropriate for 4D volumes"); return 1; } if (tmin >= nvol) { printfx("tmin must be from 0..%d, not %d\n", nvol - 1, tmin); return 1; } int tminVol = MAX(0, tmin); int tFinalVol = tminVol + tsize - 1; //e.g. if tmin=0 and tsize=1, tFinal=0 if (tsize < 0) { tFinalVol = INT_MAX; } tFinalVol = MIN(tFinalVol, nvol - 1); if ((tminVol == 0) && (tFinalVol == (nvol - 1))) return 0; int nvolOut = tFinalVol - tminVol + 1; flt *imgIn = (flt *)nim->data; nim->nvox = nvox3D * nvolOut; void *dat = (void *)calloc(1, nim->nvox * sizeof(flt)); flt *imgOut = (flt *)dat; imgIn += tminVol * nvox3D; xmemcpy(imgOut, imgIn, nim->nvox * sizeof(flt)); free(nim->data); nim->data = dat; if (nvolOut == 1) nim->ndim = 3; else nim->ndim = 4; //nim->dim[4] = nvolOut; nim->nt = nvolOut; #ifndef USING_WASM nim->nu = 1; nim->nv = 1; nim->nw = 1; #endif return 0; } staticx void nifti_add2(flt *v, size_t n, flt intercept1) { //#pragma omp parallel for for (size_t i = 0; i < n; i++) v[i] += intercept1; } //nifti_add() staticx int nifti_rescale(nifti_image *nim, double scale, double intercept) { //linear transform of data if (nim->nvox < 1) return 1; flt scl = scale; flt inter = intercept; flt *f32 = (flt *)nim->data; if (intercept == 0.0) { if (scale == 1.0) return 0; //nothing to do nifti_mul(f32, nim->nvox, scl); return 0; } else if (scale == 1.0) { nifti_add(f32, nim->nvox, intercept); return 0; } nifti_fma(f32, nim->nvox, scl, inter); //for (size_t i = 0; i < nim->nvox; i++ ) // f32[i] = (f32[i] * scl) + inter; return 0; } #ifndef USING_WASM staticx int nifti_tfceS(nifti_image *nim, double H, double E, int c, int x, int y, int z, double tfce_thresh) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; if ((x < 0) || (x >= nim->nx) || (y < 0) || (y >= nim->ny) || (z < 0) || (z >= nim->nz)) { printfx("tfceS x/y/z must be in range 0..%" PRId64 "/0..%" PRId64 "/0..%" PRId64 "\n", nim->nx - 1, nim->ny - 1, nim->nz - 1); } if (!neg_determ(nim)) x = nim->nx - x - 1; int seed = x + (y * nim->nx) + (z * nim->nx * nim->ny); flt *inimg = (flt *)nim->data; if (inimg[seed] < H) { printfx("it doesn't reach to specified threshold\n"); return 1; } size_t nvox3D = nim->nx * nim->ny * nim->nz; if (nim->nvox > nvox3D) { printfx("tfceS not suitable for 4D data.\n"); return 1; } //printf("peak %g\n", inimg[seed]); int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { printfx("suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int dx = abs(x) + abs(y) + abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j + numk] = x; //avoid left-right wrap k[j + numk + numk] = x; //avoid anterior-posterior wrap j++; } //for x flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++) mx = MAX((inimg[i]), mx); double dh = mx / 100.0; flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //output image int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64); memset(outimg, 0, nvox3D * sizeof(flt)); //zero array //for (int i = 0; i < nvox3D; i++) // outimg[i] = 0.0; int n_steps = (int)ceil(mx / dh); //for (int step=0; step<n_steps; step++) { for (int step = n_steps - 1; step >= 0; step--) { flt thresh = (step + 1) * dh; memset(vxs, 0, nvox3D * sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int qlo = 0; int qhi = 0; q[qhi] = seed; //add starting voxel as seed in queue vxs[seed] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x + k[j + numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + k[j + numk + numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; //printf("step %d thresh %g\n", step, outimg[seed]); if (outimg[seed] >= tfce_thresh) break; } //for each step if (outimg[seed] < tfce_thresh) printfx("it doesn't reach to specified threshold (%g < %g)\n", outimg[seed], tfce_thresh); for (size_t i = 0; i < nvox3D; i++) if (outimg[i] == 0.0) inimg[i] = 0.0; _mm_free(q); _mm_free(vxs); _mm_free(outimg); _mm_free(k); return 0; } #endif staticx int nifti_tfce(nifti_image *nim, double H, double E, int c) { //https://www.fmrib.ox.ac.uk/datasets/techrep/tr08ss1/tr08ss1.pdf if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { printfx("suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int dx = abs(x) + abs(y) + abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j + numk] = x; //avoid left-right wrap k[j + numk + numk] = x; //avoid anterior-posterior wrap j++; } //for x //omp notes: here we compute each volume independently. // Christian Gaser computes the step loop in parallel, which accelerates 3D cases // This code is very quick on 3D, so this does not seem crucial, and avoids critical sections #pragma omp parallel for for (int vol = 0; vol < nvol; vol++) { //identify clusters flt *inimg = (flt *)nim->data; inimg += vol * nvox3D; flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++) mx = MAX((inimg[i]), mx); double dh = mx / 100.0; flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //output image int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64); memset(outimg, 0, nvox3D * sizeof(flt)); //zero array //for (int i = 0; i < nvox3D; i++) // outimg[i] = 0.0; int n_steps = (int)ceil(mx / dh); for (int step = 0; step < n_steps; step++) { flt thresh = (step + 1) * dh; memset(vxs, 0, nvox3D * sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int i = 0; for (int z = 0; z < nim->nz; z++) for (int y = 0; y < nim->ny; y++) for (int x = 0; x < nim->nx; x++) { if (vxs[i] == 0) { i++; continue; } //voxel did not survive or already clustered int qlo = 0; int qhi = 0; q[qhi] = i; //add starting voxel as seed in queue vxs[i] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x + k[j + numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + k[j + numk + numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; i++; } //for each voxel } //for each step for (int i = 0; i < nvox3D; i++) inimg[i] = outimg[i]; _mm_free(q); _mm_free(vxs); _mm_free(outimg); } _mm_free(k); return 0; } //nifti_tfce() staticx int nifti_grid(nifti_image *nim, double v, int spacing) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2)) return 1; if (nim->datatype != DT_CALC) return 1; size_t nxy = (nim->nx * nim->ny); size_t nzt = nim->nvox / nxy; flt *f32 = (flt *)nim->data; flt fv = v; #pragma omp parallel for for (size_t i = 0; i < nzt; i++) { //for each 2D slices size_t so = i * nxy; //slice offset int z = (i % nim->nz); if ((nim->nz > 1) && ((z % spacing) == 0)) { //whole slice is grid for (size_t j = 0; j < nxy; j++) f32[so++] = fv; continue; } for (size_t y = 0; y < nim->ny; y++) for (size_t x = 0; x < nim->nx; x++) { if ((x % spacing) == 0) f32[so] = fv; so++; } so = i * nxy; //slice offset for (size_t y = 0; y < nim->ny; y++) for (size_t x = 0; x < nim->nx; x++) { if ((y % spacing) == 0) f32[so] = fv; so++; } } //for i: each 2D slice return 0; } staticx int nifti_rem(nifti_image *nim, double v, int isFrac) { //remainder (modulo) : fslmaths /*fmod(0.45, 2) = 0.45 : 0 fmod(0.9, 2) = 0.9 : 0 fmod(1.35, 2) = 1.35 : 1 fmod(1.8, 2) = 1.8 : 1 fmod(-0.45, 2) = -0.45 : 0 fmod(-0.9, 2) = -0.9 : 0 fmod(-1.35, 2) = -1.35 : -1 fmod(-1.8, 2) = -1.8 : -1 */ if (nim->datatype != DT_CALC) return 1; if (nim->nvox < 1) return 1; if (v == 0.0) { printfx("Exception: '-rem 0' does not make sense\n"); return 1; } flt fv = v; flt *f32 = (flt *)nim->data; if (isFrac) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = fmod(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++) { //printf("fmod(%g, %g) = %g : %g\n", f32[i], fv, fmod(f32[i],fv), trunc(fmod(f32[i],fv)) ); f32[i] = trunc(fmod(f32[i], fv)); } } return 0; } staticx int nifti_thr(nifti_image *nim, double v, int modifyBrightVoxels, float newIntensity) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt *f32 = (flt *)nim->data; if (modifyBrightVoxels) { for (size_t i = 0; i < nim->nvox; i++) if (f32[i] > fv) f32[i] = newIntensity; } else { for (size_t i = 0; i < nim->nvox; i++) if (f32[i] < fv) f32[i] = newIntensity; } return 0; } printfx("nifti_thr: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_thr() staticx int nifti_max(nifti_image *nim, double v, int useMin) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt *f32 = (flt *)nim->data; if (useMin) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = fmin(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++) f32[i] = fmax(f32[i], fv); } return 0; } printfx("nifti_max: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_max() staticx int nifti_inm(nifti_image *nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0)) return 1; int nvol = nim->nvox / nvox3D; flt *f32 = (flt *)nim->data; #pragma omp parallel for for (int v = 0; v < nvol; v++) { size_t vi = v * nvox3D; double sum = 0.0; #define gt0 #ifdef gt0 int n = 0; for (size_t i = 0; i < nvox3D; i++) { if (f32[vi + i] > 0.0f) { n++; sum += f32[vi + i]; } } if (sum == 0.0) continue; double ave = sum / n; #else for (int i = 0; i < nvox3D; i++) sum += f32[vi + i]; if (sum == 0.0) continue; double ave = sum / nvox3D; #endif //printf("%g %g\n", ave, M); flt scale = M / ave; for (int i = 0; i < nvox3D; i++) f32[vi + i] *= scale; } return 0; } // nifti_inm() staticx int nifti_ing(nifti_image *nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt *f32 = (flt *)nim->data; double sum = 0.0; int n = 0; for (size_t i = 0; i < nim->nvox; i++) { if (f32[i] > 0.0f) { n++; sum += f32[i]; } } if (sum == 0) return 0; double ave = sum / n; flt scale = M / ave; #pragma omp parallel for for (int i = 0; i < nim->nvox; i++) f32[i] *= scale; return 0; } //nifti_ing() staticx int compare(const void *a, const void *b) { flt fa = *(const flt *)a; flt fb = *(const flt *)b; return (fa > fb) - (fa < fb); } staticx void dtrend(flt *xx, int npt, int pt0) { //linear detrend, first point is set to zero // if pt0=0 then mean is zero, pt0=1 then first point is zero, if pt0=2 final point is zero double t1, t3, t10, x0, x1; int ii; if (npt < 2 || xx == NULL) return; x0 = xx[0]; x1 = 0.0; for (ii = 1; ii < npt; ii++) { x0 += xx[ii]; x1 += xx[ii] * ii; } t1 = npt * x0; t3 = 1.0 / npt; t10 = npt * npt; double f0 = (double)(2.0 / (npt + 1.0) * t3 * (2.0 * t1 - 3.0 * x1 - x0)); double f1 = (double)(-6.0 / (t10 - 1.0) * t3 * (-x0 - 2.0 * x1 + t1)); //printf("%.8g %.8g %g\n", f0, f1, xx[0]); if (pt0 == 1) f0 = xx[0]; if (pt0 == 2) f0 = xx[npt - 1] - (f1 * (npt - 1)); for (ii = 0; ii < npt; ii++) xx[ii] -= (f0 + f1 * ii); } staticx int nifti_detrend_linear(nifti_image *nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 2) { printfx("detrend requires a 4D image with at least three volumes\n"); return 1; } flt *img = (flt *)nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { flt *data = (flt *)_mm_malloc(nvol * sizeof(flt), 64); //load one voxel across all timepoints int j = 0; for (size_t v = i; v < nim->nvox; v += nvox3D) { data[j] = img[v]; j++; } //detrend dtrend(data, nvol, 0); //save one voxel across all timepoints j = 0; for (size_t v = i; v < nim->nvox; v += nvox3D) { img[v] = data[j]; j++; } _mm_free(data); } return 0; } #ifdef bandpass //https://github.com/QtSignalProcessing/QtSignalProcessing/blob/master/src/iir.cpp //https://github.com/rkuchumov/day_plot_diagrams/blob/8df48af431dc76b1656a627f1965d83e8693ddd7/data.c //https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html // Sample rate and desired cutoff frequencies (in Hz). // double highcut = 1250; // double lowcut = 500; // double samp_rate = 5000; //[b,a] = butter(2, [0.009, 0.08]); //https://afni.nimh.nih.gov/afni/community/board/read.php?1,84373,137180#msg-137180 //Power 2011, Satterthwaite 2013, Carp 2011, Power's reply to Carp 2012 // https://github.com/lindenmp/rs-fMRI/blob/master/func/ButterFilt.m //https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html /* The function butterworth_filter() emulates Jan Simon's FiltFiltM it uses Gustafsson’s method and padding to reduce ringing at start/end https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm?focused=5193423&tab=function Copyright (c) 2011, Jan Simon All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ staticx int butterworth_filter(flt *img, int nvox3D, int nvol, double fs, double highcut, double lowcut) { //sample rate, low cut and high cut are all in Hz //this attempts to emulate performance of https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm // specifically, prior to the forward and reverse pass the coefficients are estimated by a forward and reverse pass int order = 2; if (order <= 0) return 1; if ((highcut <= 0.0) && (lowcut <= 0.0)) return 1; if (fs <= 0.0) return 1; if ((lowcut > 0.0) && (highcut > 0.0)) printfx("butter bandpass lowcut=%g highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, highcut, fs, order, 2 * order); else if (highcut > 0.0) printfx("butter lowpass highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", highcut, fs, order, 2 * order); else if (lowcut > 0.0) printfx("butter highpass lowcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, fs, order, 2 * order); else { printfx("Butterworth parameters do not make sense\n"); return 1; } double *a; double *b; double *IC; int nX = nvol; int nA = 0; nA = butter_design(order, 2.0 * lowcut / fs, 2.0 * highcut / fs, &a, &b, &IC); int nEdge = 3 * (nA - 1); if ((nA < 1) || (nX <= nEdge)) { printfx("filter requires at least %d samples\n", nEdge); _mm_free(a); _mm_free(b); _mm_free(IC); return 1; } #pragma omp parallel for for (int vx = 0; vx < nvox3D; vx++) { double *X = (double *)_mm_malloc(nX * sizeof(double), 64); size_t vo = vx; flt mn = INFINITY; flt mx = -INFINITY; for (int j = 0; j < nX; j++) { X[j] = img[vo]; mn = MIN(mn, X[j]); mx = MAX(mx, X[j]); vo += nvox3D; } if (mn < mx) { //some variability double *Xi = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xi[nEdge - i - 1] = X[0] - (X[i + 1] - X[0]); double *CC = (double *)_mm_malloc((nA - 1) * sizeof(double), 64); for (int i = 0; i < (nA - 1); i++) CC[i] = IC[i] * Xi[0]; double *Xf = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xf[i] = X[nX - 1] - (X[nX - 2 - i] - X[nX - 1]); Filt(Xi, nEdge, a, b, nA - 1, CC); //filter head Filt(X, nX, a, b, nA - 1, CC); //filter array Filt(Xf, nEdge, a, b, nA - 1, CC); //filter tail //reverse for (int i = 0; i < (nA - 1); i++) CC[i] = IC[i] * Xf[nEdge - 1]; FiltRev(Xf, nEdge, a, b, nA - 1, CC); //filter tail FiltRev(X, nX, a, b, nA - 1, CC); //filter array _mm_free(Xi); _mm_free(Xf); _mm_free(CC); } else { //else no variability: set all voxels to zero for (int j = 0; j < nX; j++) X[j] = 0; } //save data to 4D array vo = vx; for (int j = 0; j < nX; j++) { img[vo] = X[j]; vo += nvox3D; } _mm_free(X); } //for vx _mm_free(b); _mm_free(a); _mm_free(IC); return 0; } staticx int nifti_bandpass(nifti_image *nim, double hp_hz, double lp_hz, double TRsec) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz); if (TRsec <= 0.0) TRsec = nim->nt; //pixdim[4]; if (TRsec <= 0) { printfx("Unable to determine sample rate\n"); return 1; } if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { printfx("bandpass requires 4D datasets\n"); return 1; } return butterworth_filter((flt *)nim->data, nvox3D, nvol, 1 / TRsec, hp_hz, lp_hz); } #endif //#define DEBUG_ENABLED #ifdef DEBUG_ENABLED staticx int xyzt2txyz(nifti_image *nim) { size_t nxyz = nim->nx * nim->ny * nim->nz; size_t nt = nim->nt; if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2)) return 1; if (nim->datatype != DT_CALC) return 1; flt *img = (flt *)nim->data; flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp xmemcpy(inimg, img, nim->nvox * sizeof(flt)); size_t i = 0; #pragma omp parallel for for (size_t x = 0; x < nxyz; x++) { for (size_t t = 0; t < nt; t++) { img[i] = inimg[x + t * nxyz]; i++; } } _mm_free(inimg); return 0; } staticx int txyz2xyzt(nifti_image *nim) { size_t nxyz = nim->nx * nim->ny * nim->nz; size_t nt = nim->nt; if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2)) return 1; if (nim->datatype != DT_CALC) return 1; flt *img = (flt *)nim->data; flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp xmemcpy(inimg, img, nim->nvox * sizeof(flt)); size_t i = 0; #pragma omp parallel for for (size_t x = 0; x < nxyz; x++) { for (size_t t = 0; t < nt; t++) { img[x + t * nxyz] = inimg[i]; i++; } } _mm_free(inimg); return 0; } staticx int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) { //Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m //5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1 // http://www.fast.u-psud.fr/ezyfit/html/ezfit.html //gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2)) // regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight if (nim->datatype != DT_CALC) return 1; if ((hp_sigma <= 0) && (lp_sigma <= 0)) return 0; size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { printfx("bptf requires 4D datasets\n"); return 1; } int *hpStart, *hpEnd; double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0; if (hp_sigma > 0) { //initialize high-pass reusables //Spielberg's code uses 8*sigma, does not match current fslmaths: //tested with fslmaths freq4d -bptf 10 -1 nhp //cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412 int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3 hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma))); hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64); hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64); hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64); // hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2 hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { //linear regression with "gauss" fitting hpStart[v] = MAX(0, v - cutoffhp); hpEnd[v] = MIN(nvol - 1, v + cutoffhp); double sumX = 0.0; double sumX2 = 0.0; double sumWt = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k - v; double wt = hp0[x]; //kernel weight sumX += wt * x; sumX2 += wt * x * x; sumWt += wt; } hpSumX[v] = sumX; hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2 if (hpDenom[v] == 0.0) hpDenom[v] = 1.0; //should never happen, x is known index hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later hpSumWt[v] = sumWt; } //for each volume } //high-pass reusables //low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp int *lpStart, *lpEnd; double *lpSumWt, *lp, *lp0; if (lp_sigma > 0) { //initialize low-pass reusables //simple Gaussian blur in time domain //freq4d -bptf -1 5 flp // fslmaths rest -bptf -1 5 flp // 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical // Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6 lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma))); lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64); lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64); lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { lpStart[v] = MAX(0, v - cutofflp); lpEnd[v] = MIN(nvol - 1, v + cutofflp); double sumWt = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sumWt += lp0[k - v]; //kernel weight if (sumWt == 0.0) sumWt = 1.0; //will never happen lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later } //for each volume } //low-pass reusables //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902 //if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1" //The 'cutoff' is defined as the FWHM of the filter, so if you ask for //100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs. // -bptf <hp_sigma> <lp_sigma> xyzt2txyz(nim); flt *img = (flt *)nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { //read input data flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64); flt *imgOut = img + (i * nvol); xmemcpy(imgIn, imgOut, nvol * sizeof(flt)); if (hp_sigma > 0) { double sumOut = 0.0; for (int v = 0; v < nvol; v++) { //each volume double sumY = 0.0; double sumXY = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k - v; double wt = hp0[x]; flt y = imgIn[k]; sumY += wt * y; sumXY += wt * x * y; } double n = hpSumWt[v]; double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope double b = (sumY - (m * hpSumX[v])) / n; //intercept imgOut[v] = imgIn[v] - b; sumOut += imgOut[v]; } //for each volume //"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass double mean = sumOut / (double)nvol; //de-mean AFTER high-pass if (demean) { for (int v = 0; v < nvol; v++) //each volume imgOut[v] -= mean; } } //hp_sigma > 0 if (lp_sigma > 0) { //low pass does not de-mean data //if BOTH low-pass and high-pass, apply low pass AFTER high pass: // fslmaths freq4d -bptf 45 5 fbp // difference 1.86265e-08 //still room for improvement: // fslmaths /Users/chris/src/rest -bptf 45 5 fbp // r=1.0 identical voxels 73% max difference 0.000488281 if (hp_sigma > 0) xmemcpy(imgIn, imgOut, nvol * sizeof(flt)); for (int v = 0; v < nvol; v++) { //each volume double sum = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sum += imgIn[k] * lp0[k - v]; imgOut[v] = sum * lpSumWt[v]; } // for each volume } //lp_sigma > 0 _mm_free(imgIn); } txyz2xyzt(nim); if (hp_sigma > 0) { //initialize high-pass reuseables _mm_free(hp); _mm_free(hpStart); _mm_free(hpEnd); _mm_free(hpSumX); _mm_free(hpDenom); _mm_free(hpSumWt); } if (lp_sigma > 0) { //initialize high-pass reuseables _mm_free(lp); _mm_free(lpStart); _mm_free(lpEnd); _mm_free(lpSumWt); } return 0; } // nifti_bptf() #else staticx int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) { //Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m //5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1 // http://www.fast.u-psud.fr/ezyfit/html/ezfit.html //gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2)) // regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight if (nim->datatype != DT_CALC) return 1; if ((hp_sigma <= 0) && (lp_sigma <= 0)) return 0; size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { printfx("bptf requires 4D datasets\n"); return 1; } int *hpStart, *hpEnd; double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0; if (hp_sigma > 0) { //initialize high-pass reusables //Spielberg's code uses 8*sigma, does not match current fslmaths: //tested with fslmaths freq4d -bptf 10 -1 nhp //cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412 int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3 hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma))); hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64); hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64); hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64); hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2 hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { //linear regression with "gauss" fitting hpStart[v] = MAX(0, v - cutoffhp); hpEnd[v] = MIN(nvol - 1, v + cutoffhp); double sumX = 0.0; double sumX2 = 0.0; double sumWt = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k - v; double wt = hp0[x]; //kernel weight sumX += wt * x; sumX2 += wt * x * x; sumWt += wt; } hpSumX[v] = sumX; hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2 if (hpDenom[v] == 0.0) hpDenom[v] = 1.0; //should never happen, x is known index hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later hpSumWt[v] = sumWt; } //for each volume } //high-pass reusables //low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp int *lpStart, *lpEnd; double *lpSumWt, *lp, *lp0; if (lp_sigma > 0) { //initialize low-pass reusables //simple Gaussian blur in time domain //freq4d -bptf -1 5 flp // fslmaths rest -bptf -1 5 flp // 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical // Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6 lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma))); lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64); lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64); lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { lpStart[v] = MAX(0, v - cutofflp); lpEnd[v] = MIN(nvol - 1, v + cutofflp); double sumWt = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sumWt += lp0[k - v]; //kernel weight if (sumWt == 0.0) sumWt = 1.0; //will never happen lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later } //for each volume } //low-pass reusables //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902 //if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1" //The 'cutoff' is defined as the FWHM of the filter, so if you ask for //100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs. // -bptf <hp_sigma> <lp_sigma> flt *img = (flt *)nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { //read input data flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64); flt *imgOut = (flt *)_mm_malloc((nvol) * sizeof(flt), 64); int j = 0; for (size_t v = i; v < nim->nvox; v += nvox3D) { imgIn[j] = img[v]; j++; } if (hp_sigma > 0) { double sumOut = 0.0; for (int v = 0; v < nvol; v++) { //each volume double sumY = 0.0; double sumXY = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k - v; double wt = hp0[x]; flt y = imgIn[k]; sumY += wt * y; sumXY += wt * x * y; } double n = hpSumWt[v]; double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope double b = (sumY - (m * hpSumX[v])) / n; //intercept imgOut[v] = imgIn[v] - b; sumOut += imgOut[v]; } //for each volume //"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass double mean = sumOut / (double)nvol; //de-mean AFTER high-pass if (demean) { for (int v = 0; v < nvol; v++) //each volume imgOut[v] -= mean; } } //hp_sigma > 0 if (lp_sigma > 0) { //low pass does not de-mean data //if BOTH low-pass and high-pass, apply low pass AFTER high pass: // fslmaths freq4d -bptf 45 5 fbp // difference 1.86265e-08 //still room for improvement: // fslmaths /Users/chris/src/rest -bptf 45 5 fbp // r=1.0 identical voxels 73% max difference 0.000488281 if (hp_sigma > 0) xmemcpy(imgIn, imgOut, nvol * sizeof(flt)); for (int v = 0; v < nvol; v++) { //each volume double sum = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sum += imgIn[k] * lp0[k - v]; imgOut[v] = sum * lpSumWt[v]; } // for each volume } //lp_sigma > 0 //write filtered data j = 0; for (size_t v = i; v < nim->nvox; v += nvox3D) { img[v] = imgOut[j]; j++; } _mm_free(imgIn); _mm_free(imgOut); } if (hp_sigma > 0) { //initialize high-pass reuseables _mm_free(hp); _mm_free(hpStart); _mm_free(hpEnd); _mm_free(hpSumX); _mm_free(hpDenom); _mm_free(hpSumWt); } if (lp_sigma > 0) { //initialize high-pass reuseables _mm_free(lp); _mm_free(lpStart); _mm_free(lpEnd); _mm_free(lpSumWt); } return 0; } // nifti_bptf() #endif staticx int nifti_demean(nifti_image *nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { printfx("demean requires 4D datasets\n"); return 1; } flt *img = (flt *)nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { double sum = 0.0; for (size_t v = i; v < nim->nvox; v += nvox3D) sum += img[v]; double mean = sum / nvol; for (size_t v = i; v < nim->nvox; v += nvox3D) img[v] -= mean; } return 0; } #ifndef USING_WASM staticx int nifti_dim_reduce(nifti_image *nim, enum eDimReduceOp op, int dim, int percentage) { //e.g. nifti_dim_reduce(nim, Tmean, 4) reduces 4th dimension, saving mean //int nReduce = nim->dim[dim]; int nReduce = 0; if (dim == 1) nReduce = nim->nx; if (dim == 2) nReduce = nim->ny; if (dim == 3) nReduce = nim->nz; if (dim == 4) nReduce = nim->nt; if ((nReduce <= 1) || (dim < 1) || (dim > 4)) return 0; //nothing to reduce, fslmaths does not generate an error if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1)) return 1; //size_t nvox3D = nim->nx * nim->ny * nim->nz; //int nvol = nim->nvox / nvox3D; //if ((nvox3D * nvol) != nim->nvox) return 1; if (nim->datatype != DT_CALC) return 1; if (nim->ndim > 4) printfx("dimension reduction collapsing %" PRId64 "D into to 4D\n", nim->ndim); int dims[8], indims[8]; for (int i = 0; i < 8; i++) dims[i] = 0; dims[1] = nim->nx; dims[2] = nim->ny; dims[3] = nim->nz; //for (int i = 0; i < 4; i++) // dims[i] = MAX(nim->dim[i], 1); //XYZT limits to 4 dimensions, so collapse dims [4,5,6,7] dims[4] = nim->nvox / (dims[1] * dims[2] * dims[3]); for (int i = 5; i < 8; i++) dims[i] = 1; for (int i = 0; i < 8; i++) indims[i] = dims[i]; if ((dims[1] * dims[2] * dims[3] * dims[4]) != nim->nvox) return 1; //e.g. data in dim 5..7! dims[dim] = 1; if (dim == 4) dims[0] = 3; //reduce 4D to 3D size_t nvox = dims[1] * dims[2] * dims[3] * dims[4]; flt *i32 = (flt *)nim->data; void *dat = (void *)calloc(1, nim->nvox * sizeof(flt)); flt *o32 = (flt *)dat; int collapseStep; //e.g. if we collapse 4th dimension, we will collapse across voxels separated by X*Y*Z if (dim == 1) collapseStep = 1; //collapse by columns else if (dim == 2) collapseStep = indims[1]; //collapse by rows else if (dim == 3) collapseStep = indims[1] * indims[2]; //collapse by slices else collapseStep = indims[1] * indims[2] * indims[3]; //collapse by volumes int xy = dims[1] * dims[2]; int xyz = xy * dims[3]; if ((op == Tmedian) || (op == Tstd) || (op == Tperc) || (op == Tar1)) { //for even number of items, two options for median, consider 4 volumes ranked // meam of 2nd and 3rd: problem one can return values not in data // 2nd value. Representative //here we use the latter approach //int itm = ((nReduce-1) * 0.5); int itm = (nReduce * 0.5); //seems correct tested with odd and even number of volumes if (op == Tperc) { double frac = ((double)percentage) / 100.0; //itm = ((nReduce-1) * frac); itm = ((nReduce)*frac); itm = MAX(itm, 0); itm = MIN(itm, nReduce - 1); } #pragma omp parallel for for (size_t i = 0; i < nvox; i++) { flt *vxls = (flt *)_mm_malloc((nReduce) * sizeof(flt), 64); size_t inPos = i; if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]); } for (int v = 0; v < nReduce; v++) { vxls[v] = i32[inPos]; inPos += collapseStep; } if ((op == Tstd) || (op == Tar1)) { //computed in cache, far fewer operations than Welford //note 64-bit double precision even if 32-bit DT_CALC //neither precision gives identical results // double precision attenuates catastrophic cancellation double sum = 0.0; for (int v = 0; v < nReduce; v++) sum += vxls[v]; double mean = sum / nReduce; double sumSqr = 0.0; for (int v = 0; v < nReduce; v++) sumSqr += sqr(vxls[v] - mean); if (op == Tstd) o32[i] = sqrt(sumSqr / (nReduce - 1)); else { //Tar1 if (sumSqr == 0.0) { o32[i] = 0.0; continue; } for (int v = 0; v < nReduce; v++) vxls[v] = vxls[v] - mean; //demean double r = 0.0; for (int v = 1; v < nReduce; v++) r += (vxls[v] * vxls[v - 1]) / sumSqr; o32[i] = r; } } else { //Tperc or Tmedian qsort(vxls, nReduce, sizeof(flt), compare); o32[i] = vxls[itm]; } _mm_free(vxls); } //for i: each voxel } else { #pragma omp parallel for for (size_t i = 0; i < nvox; i++) { size_t inPos = i; //ok if dim==4 if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]); } double sum = 0.0; flt mx = i32[inPos]; flt mn = mx; int mxn = 0; //flt sd = 0.0; //flt mean = 0.0; for (int v = 0; v < nReduce; v++) { flt f = i32[inPos]; sum += f; if (f > mx) { mx = f; mxn = v; } mn = MIN(mn, f); //Welford https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // 2-pass method faster //flt delta = f - mean; //mean = mean + delta / (v+1); //sd = sd + delta*(f- mean); inPos += collapseStep; } if (op == Tmean) o32[i] = sum / nReduce; //mean else if (op == Tmax) o32[i] = mx; //max else if (op == Tmaxn) o32[i] = mxn; //maxn else if (op == Tmin) o32[i] = mn; //min } } //if opel nim->nvox = nvox; //for (int i = 0; i < 4; i++) // nim->dim[i] = dims[i]; nim->ndim = dims[0]; nim->nx = dims[1]; nim->ny = dims[2]; nim->nz = dims[3]; nim->nt = dims[4]; nim->nu = dims[5]; nim->nv = dims[6]; nim->nw = dims[7]; free(nim->data); nim->data = dat; return 0; } //Tar1 #endif staticx int *make_kernel_gauss(nifti_image *nim, int *nkernel, double sigmamm) { sigmamm = fabs(sigmamm); if (sigmamm == 0.0) return NULL; double mmCutoff = sigmamm * 6.0; //maximum extent int x = (2 * floor(mmCutoff / nim->dx)) + 1; int y = (2 * floor(mmCutoff / nim->dy)) + 1; int z = (2 * floor(mmCutoff / nim->dz)) + 1; int xlo = (int)(-x / 2); int ylo = (int)(-y / 2); int zlo = (int)(-z / 2); //betterthanfsl // fsl computes gaussian for all values in cube // from first principles, a spherical filter has less bias // since weighting is very low at these edge voxels, it has little impact on // "-fmean", however with other filters like "dilM", fsl's solution works like // a "box" filter, not a "sphere" filter // default is to clone fsl #ifdef betterthanfsl //true sphere at cutouff //first pass: determine number of surviving voxels (n) int n = 0; for (int zi = zlo; zi < (zlo + z); zi++) for (int yi = ylo; yi < (ylo + y); yi++) for (int xi = xlo; xi < (xlo + x); xi++) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx * dx + dy * dy + dz * dz); if (dist > mmCutoff) continue; n++; } *nkernel = n; int kernelWeight = (int)((double)INT_MAX / (double)n); //requires <limits.h> int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary //second pass: fill surviving voxels int i = 0; double expd = 2.0 * sigmamm * sigmamm; for (int zi = zlo; zi < (zlo + z); zi++) for (int yi = ylo; yi < (ylo + y); yi++) for (int xi = xlo; xi < (xlo + x); xi++) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx * dx + dy * dy + dz * dz); if (dist > mmCutoff) continue; kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i + n] = xi; //left-right wrap detection kernel[i + n + n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0 * (dist * dist) / expd); i++; } #else int n = x * y * z; *nkernel = n; int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary int i = 0; double expd = 2.0 * sigmamm * sigmamm; for (int zi = zlo; zi < (zlo + z); zi++) for (int yi = ylo; yi < (ylo + y); yi++) for (int xi = xlo; xi < (xlo + x); xi++) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx * dx + dy * dy + dz * dz); //if (dist > mmCutoff) continue; //<- fsl fills all kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i + n] = xi; //left-right wrap detection kernel[i + n + n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0 * (dist * dist) / expd); i++; } #endif double sum = 0.0; for (int i = 0; i < n; i++) sum += wt[i]; //sum of entire gaussian is 1 double scale = 1.0 / sum; scale *= (double)INT_MAX; //we use integer scaling: in future faster to typecast integer as flt (if int=32bit) or double (if int=64bit) for (int i = 0; i < n; i++) kernel[i + n + n + n] = wt[i] * scale; _mm_free(wt); return kernel; } //make_kernel_gauss() staticx flt calmax(nifti_image *nim) { if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt *in32 = (flt *)nim->data; flt mx = in32[0]; for (size_t i = 0; i < nim->nvox; i++) mx = MAX(mx, in32[i]); return mx; } staticx flt calmin(nifti_image *nim) { if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt *in32 = (flt *)nim->data; flt mn = in32[0]; for (size_t i = 0; i < nim->nvox; i++) mn = MIN(mn, in32[i]); return mn; } #ifndef USING_WASM staticx int nifti_tensor_2(nifti_image *nim, int lower2upper) { int nvox3D = nim->nx * nim->ny * nim->nz; if ((nim->datatype != DT_CALC) || (nvox3D < 1)) return 1; int nVol = (int)(nim->nvox / nvox3D); if (nVol != 6) { printfx("nifti_tensor_2: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } //3dAFNItoNIFTI does not set intent_code to NIFTI_INTENT_SYMMATRIX, so check dimensions if ((lower2upper) && (nim->nt == 6)) printfx("nifti_tensor_2: check images (header suggests already in upper triangle format)\n"); if ((!lower2upper) && (nim->nt == 6)) printfx("nifti_tensor_2: check images (header suggests already in lower triangle format)\n"); //lower xx xy yy xz yz zz //upper xx xy xz yy yz zz //swap volumes 3 and 4 flt *in32 = (flt *)nim->data; flt *tmp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); flt *v3 = in32 + (2 * nvox3D); flt *v4 = in32 + (3 * nvox3D); xmemcpy(tmp, v4, nvox3D * sizeof(flt)); xmemcpy(v4, v3, nvox3D * sizeof(flt)); xmemcpy(v3, tmp, nvox3D * sizeof(flt)); _mm_free(tmp); if (lower2upper) { //FSL uses non-standard upper triangle //nim->dim[0] = 4; //for (int i = 4; i < 8; i++) // nim->dim[i] = 1; //nim->dim[4] = 6; nim->ndim = 4; nim->nt = 6; nim->nu = 1; nim->nv = 1; nim->nw = 1; } else { //upper2lower //lower is NIfTI default, used by AFNI, Camino, ANTS nim->intent_code = NIFTI_INTENT_SYMMATRIX; /*! To store an NxN symmetric matrix at each voxel: - dataset must have a 5th dimension - intent_code must be NIFTI_INTENT_SYMMATRIX - dim[5] must be N*(N+1)/2 - intent_p1 must be N (in float format) - the matrix values A[i][[j] are stored in row-order: - A[0][0] - A[1][0] A[1][1] - A[2][0] A[2][1] A[2][2] - etc.: row-by-row*/ //nim->dim[0] = 5; //for (int i = 4; i < 8; i++) // nim->dim[i] = 1; //nim->dim[5] = 6; nim->ndim = 5; nim->nt = 1; nim->nu = 6; nim->nv = 1; nim->nw = 1; } return 0; } #endif staticx int nifti_tensor_decomp(nifti_image *nim, int isUpperTriangle) { // MD= (Dxx+Dyy+Dzz)/3 //https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software // dtifit produces upper-triangular order: xx xy xz yy yz zz //MD = 1/3*(Dxx+Dyy+Dzz) //FA= sqrt(3/2)*sqrt(((Dx-MD)^2+(Dy-MD)^2+(Dz-MD^2))/(Dx^2+Dy^2+Dz^2)) //fslmaths tensor.nii -tensor_decomp bork.nii // 3dDTeig -uddata -sep_dsets -prefix AFNIdwi.nii tensor.nii //3dDTeig expects LOWER diagonal order unless -uddata // Dxx,Dxy,Dyy,Dxz,Dyz,Dzz // https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDTeig.html //dxx, dxy, dyy, dxz, dyz, dzz // 3dDTeig -uddata -prefix AFNIdwi.nii tensor.nii // fslmaths tensor.nii -tensor_decomp bork.nii // Creates 5*3D and 3*4D files for a total of 14 volumes L1,L2,L3,V1(3),V2(3),V3(3),FA,MD #ifdef tensor_decomp if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * nim->nz; int nVol = (int)(nim->nvox / nvox3D); if (nVol != 6) { printfx("nifti_tensor_decomp: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } flt *in32 = (flt *)nim->data; //detect if data is upper or lower triangle // The "YY" component should be brighter (strongly positive) than the off axis XZ #define detectUpperOrLower #ifdef detectUpperOrLower double sumV3 = 0.0; //3rd volume, YY for lower, XZ for upper double sumV4 = 0.0; //4th volume, XZ for lower, YY for upper flt *v32 = in32 + (nvox3D * 2); //offset to 3rd volume for (size_t i = 0; i < nvox3D; i++) sumV3 += v32[i]; v32 = in32 + (nvox3D * 3); //offset to 4th volume for (size_t i = 0; i < nvox3D; i++) sumV4 += v32[i]; if ((sumV4 > sumV3) && (!isUpperTriangle)) printfx("nifti_tensor_decomp: check results, input looks like UPPER triangle.\n"); if ((sumV4 < sumV3) && (isUpperTriangle)) printfx("nifti_tensor_decomp: check results, input looks like LOWER triangle.\n"); #endif flt *out32 = (flt *)_mm_malloc(14 * nvox3D * sizeof(flt), 64); for (size_t i = 0; i < nvox3D; i++) { //n.b. in6 and out14 are ALWAYS float regradless of DT32, e.g. single even if DT=double float *in6 = (float *)_mm_malloc(6 * sizeof(float), 64); float *out14 = (float *)_mm_malloc(14 * sizeof(float), 64); size_t iv = i; for (int v = 0; v < 6; v++) { in6[v] = in32[iv]; iv += nvox3D; } EIG_tsfunc(0.0, 0.0, 0, in6, 0.0, 0.0, NULL, 0, out14, isUpperTriangle); size_t ov = i; for (int v = 0; v < 14; v++) { out32[ov] = out14[v]; ov += nvox3D; } _mm_free(out14); _mm_free(in6); } free(nim->data); // Creates 5*3D and 3*4D files for a total of 14 volumes L1(0),L2(1),L3(2),V1(3,4,5),V2(6,7,8),V3(9,10,11),FA(12),MD(13) flt *outv; //save 4D images nim->cal_min = -1; nim->cal_max = 1; nim->nvox = nvox3D * 3; nim->ndim = 4; nim->nt = 3; nim->nu = 1; nim->nv = 1; nim->nw = 1; //nim->dim[0] = 4; //nim->dim[4] = 3; //for (int i = 5; i < 8; i++) // nim->dim[i] = 1; //void * dat = (void *)calloc(1, 3*nvox3D * sizeof(flt)) ; //nim->data = dat; //flt * fa32 = (flt *) dat; //save V1 outv = out32 + (nvox3D * 3); nim->data = (void *)outv; nifti_save(nim, "_V1"); //save V2 outv = out32 + (nvox3D * 6); //xmemcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V2"); //save V3 outv = out32 + (nvox3D * 9); //xmemcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V3"); //release 4D memory //free(dat); //save 3D images nim->cal_min = 0; nim->cal_max = 0; nim->nvox = nvox3D * 1; nim->ndim = 3; nim->nt = 1; //nim->dim[0] = 3; //nim->dim[4] = 1; //save L1 outv = out32; //xmemcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L1"); //save L2 outv = out32 + (nvox3D * 1); //xmemcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L2"); //save L3 outv = out32 + (nvox3D * 2); //xmemcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L3"); //save MD outv = out32 + (nvox3D * 13); //xmemcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_min = calmin(nim); nim->cal_max = calmax(nim); nifti_save(nim, "_MD"); //single volume data void *dat = (void *)calloc(1, nvox3D * sizeof(flt)); nim->data = dat; flt *fa32 = (flt *)dat; //save MO //MODE https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;4fbed3d1.1103 // compute MO (MODE) from L1, L2, L3, MD //e1=l1-MD, e2=l2-MD, e3=l3-MD; //n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3); //d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3); //d = 2*d*d*d; //mode = n/d; //something is wrong with this formula. // a. Ennis 2006 includes a sqrt that can not be factored out // b. results differ from fslmaths nim->cal_min = -1; nim->cal_max = 1; flt *L1 = out32; flt *L2 = out32 + (nvox3D * 1); flt *L3 = out32 + (nvox3D * 2); flt *MD = out32 + (nvox3D * 13); for (size_t i = 0; i < nvox3D; i++) { flt e1 = L1[i] - MD[i]; flt e2 = L2[i] - MD[i]; flt e3 = L3[i] - MD[i]; flt n = (e1 + e2 - 2 * e3) * (2 * e1 - e2 - e3) * (e1 - 2 * e2 + e3); flt d = (e1 * e1 + e2 * e2 + e3 * e3 - e1 * e2 - e2 * e3 - e1 * e3); d = sqrt(d); //Correlation r = 0.999746 d = 2 * d * d * d; //d = sqrt(d); //Correlation r = 0.990319 if (d != 0) d = n / d; //mode d = MIN(d, 1.0); d = MAX(d, -1.0); fa32[i] = d; } nifti_save(nim, "_MO"); //save FA outv = out32 + (nvox3D * 12); xmemcpy(fa32, outv, nvox3D * sizeof(flt)); nim->cal_min = 0; nim->cal_max = 1; nifti_save(nim, "_FA"); //keep FA in memory nim->cal_max = 0; _mm_free(out32); return 0; #else printfx("not compiled to support tensor_decomp\n"); return 1; #endif } //nifti_tensor_decomp() staticx void kernel3D_dilall(nifti_image *nim, int *kernel, int nkernel, int vol) { int nVox3D = nim->nx * nim->ny * nim->nz; flt *f32 = (flt *)nim->data; f32 += (nVox3D * vol); flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64); xmemcpy(inf32, f32, nVox3D * sizeof(flt)); int nxy = nim->nx * nim->ny; size_t nZero = 1; while (nZero > 0) { nZero = 0; for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; nZero++; } //for x } //for y } //for z xmemcpy(inf32, f32, nVox3D * sizeof(flt)); //printf("n=0: %zu\n", nZero); } //nZero > 0 _mm_free(inf32); } //kernel3D_dilall() staticx int kernel3D(nifti_image *nim, enum eOp op, int *kernel, int nkernel, int vol) { int nVox3D = nim->nx * nim->ny * nim->nz; flt *f32 = (flt *)nim->data; f32 += (nVox3D * vol); flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64); xmemcpy(inf32, f32, nVox3D * sizeof(flt)); int nxy = nim->nx * nim->ny; #ifndef USING_WASM //WASM does not support qsort if (op == fmediank) { flt *vxls = (flt *)_mm_malloc((nkernel) * sizeof(flt), 64); for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; int nOK = 0; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior vxls[nOK] = inf32[vx]; nOK++; } //for k qsort(vxls, nOK, sizeof(flt), compare); int itm = (nOK * 0.5); f32[i] = vxls[itm]; } //for x } //for y } //for z _mm_free(vxls); } else #endif //WASM does not support qsort if (op == dilMk) { for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; } //for x } //for y } //for z } else if (op == dilDk) { //maximum - fslmaths 6.0.1 emulation, note really MODE, max non-zero for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; if (f32[i] != 0.0) continue; //flt mx = -INFINITY; flt mx = NAN; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior flt v = inf32[vx]; if (v == 0.0) continue; mx = MAX(mx, inf32[vx]); //with dilD a input voxel of 0 } //for k //https://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c // f != f will be true only if f is NaN if (!(mx != mx)) f32[i] = mx; } //for x } //for y } //for z } else if (op == dilFk) { //maximum - fslmaths 6.0.1 appears to use "dilF" when the user requests "dilD" for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; flt mx = f32[i]; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] <= mx)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior mx = MAX(mx, inf32[vx]); //if (mx < 0) continue; //with dilF, do not make a zero voxel darker than 0 } //for k f32[i] = mx; } //for x } //for y } //for z } else if (op == dilallk) { // -dilall : Apply -dilM repeatedly until the entire FOV is covered"); kernel3D_dilall(nim, kernel, nkernel, vol); } else if (op == eroFk) { //Minimum filtering of all voxels for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = MIN(f32[i], inf32[vx]); } //for k } //for x } //for y } //for z } else if (op == fmeank) { //Mean filtering, kernel weighted (conventionally used with gauss kernel) //u22a flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX); for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; flt sum = 0.0f; flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx] * kwt[k]); wt += kwt[k]; } //for k f32[i] = sum / wt; } //for x } //for y } //for z _mm_free(kwt); } else if (op == fmeanzerok) { //Mean filtering, kernel weighted (negative and positive samples sume to zero: laplacian kernel) //u22a flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX); for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; flt sumPos = 0.0f; flt wtPos = 0.0f; flt sumNeg = 0.0f; flt wtNeg = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior if (kwt[k] > 0.0) { sumPos += (inf32[vx] * kwt[k]); wtPos += kwt[k]; } else { sumNeg += (inf32[vx] * kwt[k]); wtNeg += -kwt[k]; } } //for k flt val = 0.0; if (wtPos > 0.0) val += sumPos / wtPos; if (wtNeg > 0.0) val += sumNeg / wtNeg; f32[i] = val; } //for x } //for y } //for z _mm_free(kwt); } else if (op == fmeanuk) { //Mean filtering, kernel weighted, un-normalized (gives edge effects) flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX); for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; flt sum = 0.0f; //flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx] * kwt[k]); //wt += kwt[k]; } //for k //f32[i] = sum / wt; f32[i] = sum; } //for x } //for y } //for z _mm_free(kwt); } else if (op == erok) { for (int z = 0; z < nim->nz; z++) { int i = (z * nxy) - 1; //offset for (int y = 0; y < nim->ny; y++) { for (int x = 0; x < nim->nx; x++) { i++; if (f32[i] == 0.0) continue; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] != 0.0)) continue; //next handle edge cases int dx = x + kernel[k + nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kernel[k + nkernel + nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = 0.0; } //for k } //for x } //for y } //for z } else { printfx("kernel3D: Unsupported operation\n"); _mm_free(inf32); return 1; } _mm_free(inf32); return 0; } //kernel3D staticx int nifti_kernel(nifti_image *nim, enum eOp op, int *kernel, int nkernel) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nVox3D = nim->nx * nim->ny * nim->nz; int nVol = (int)(nim->nvox / nVox3D); if (nVol < 1) return 1; if ((nkernel < 1) || (kernel == NULL)) return 1; for (int v = 0; v < nVol; v++) { int ok = kernel3D(nim, op, kernel, nkernel, v); if (ok != 0) return ok; } return 0; } staticx int nifti_zero_crossing(nifti_image *nim, int orient) { //https://homepages.inf.ed.ac.uk/rbf/HIPR2/zeros.htm // implements: A better technique is to consider points on both sides of the threshold boundary, and choose the one with the lowest absolute magnitude of the Laplacian, which will hopefully be closest to the zero crossing. //we will define edges as voxels with zero crossings // orient refers to slice direction: 1=x=Sagittal, 2=y=Coronal, 3=z=Axial, else 3D int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; int64_t nvox4D = nvox3D * nVol; flt *inimg = (flt *)nim->data; #pragma omp parallel for for (int v = 0; v < nVol; v++) { int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; flt * img = padImg3D(inimg, &nx, &ny, &nz); memset(inimg, 0, nvox4D * sizeof(flt)); //zero array int xi = 1; int yj = nx; int zk = nx * ny; //orient: only look for edges in 2D, ignore on dimesion if (orient == 1) xi = yj; if (orient == 2) yj = 1; if (orient == 3) zk = 1; int nxy = nx * ny; for (int z = 1; z < (nz -1); z++) for (int y = 1; y < (ny - 1); y++) for (size_t x = 1; x < (nx - 1); x++) { int64_t i = x + (y * nx) + (z * nxy); flt val = img[i]; flt ival = -val; //logic: opposite polarities cause negative sign: pos*neg = neg; pos*pos=pos; neg*neg=pos //check six neighbors that share a face if ((val > 0.0) && ((img[i-xi] <= ival) || (img[i+xi] <= ival) || (img[i-yj] <= ival) || (img[i+yj] <= ival) || (img[i-zk] <= ival) || (img[i+zk] <= ival) )) inimg[0] = 1.0; if ((val < 0.0) && ((img[i-xi] > ival) || (img[i+xi] > ival) || (img[i-yj] > ival) || (img[i+yj] > ival) || (img[i-zk] > ival) || (img[i+zk] > ival) )) inimg[0] = 1.0; inimg ++; } _mm_free(img); } nim->scl_inter = 0.0; nim->scl_slope = 1.0; nim->cal_min = 0.0; nim->cal_max = 1.0; return 0; } //nifti_zero_crossing #ifdef USING_TIMERS double clockMsec() { //return milliseconds since midnight struct timespec _t; clock_gettime(CLOCK_MONOTONIC, &_t); return _t.tv_sec*1000.0 + (_t.tv_nsec/1.0e6); } long timediff(double startTimeMsec, double endTimeMsec) { return round(endTimeMsec - startTimeMsec); } #endif staticx int nifti_dog(nifti_image *nim, flt SigmammPos, flt SigmammNeg, int orient) { //Difference of Gaussians (DoG): difference ratio of 1.6 approximates a Laplacian of Gaussian // https://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm flt kKernelWid = 2.5; //ceil(2.5) int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 3) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) { printfx("Image dimensions too small for Difference of Gaussian.\n"); return 1; } if (SigmammPos == SigmammNeg) { printfx("Difference of Gaussian requires two different sigma values.\n"); return 1; } if ((SigmammPos < 0) || (SigmammNeg < 0)) { printfx("Difference of Gaussian requires positive values of sigma.\n"); return 1; } #ifdef USING_TIMERS double startTime = clockMsec(); #endif flt sigmaMn = MIN(SigmammNeg, SigmammPos); flt sigmaMx = MAX(SigmammNeg, SigmammPos); //Optimization: use results from narrow blur (sigmaMn) as inputs for wide blur (sigmaMx) //consider desired blurs of 2mm and 3.2mm, we can instead compute 2mm and 2.5mmm //only about 10% faster for difference ratio of 2.0, but also removes one copy //https://computergraphics.stackexchange.com/questions/256/is-doing-multiple-gaussian-blurs-the-same-as-doing-one-larger-blur sigmaMx = sqrt((sigmaMx*sigmaMx) - (sigmaMn*sigmaMn)); flt *inimg = (flt *)nim->data; int nVol = nim->nvox / nvox3D; int64_t nvox4D = nvox3D * nVol; int ret = nifti_smooth_gauss(nim, sigmaMn, sigmaMn, sigmaMn, kKernelWid); if (ret != 0) { printfx("Gaussian smooth failed.\n"); return ret; } flt *imgMn = (flt *)_mm_malloc(nvox4D * sizeof(flt), 64); for (int64_t i = 0; i < nvox4D; i++) imgMn[i] = inimg[i]; ret = nifti_smooth_gauss(nim, sigmaMx, sigmaMx, sigmaMx, kKernelWid); if (SigmammPos > SigmammNeg) { for (int64_t i = 0; i < nvox4D; i++) inimg[i] = inimg[i] - imgMn[i]; } else { for (int64_t i = 0; i < nvox4D; i++) inimg[i] = imgMn[i] - inimg[i]; } _mm_free(imgMn); if (orient >= 0) ret = nifti_zero_crossing(nim, orient); #ifdef USING_TIMERS printfx("DoG time: %ld ms\n", timediff(startTime, clockMsec())); #endif return ret; } // nifti_dog() /* staticx int nifti_dogNew(nifti_image *nim, flt Sigmamm, flt SigmammNeg, int isEdge) { //Only one Gaussian blur - faster in theory, but slower in practice (kernel reads out of cache, kernel must be adjusted for edges) #ifdef USING_TIMERS double startTime = clockMsec(); #endif flt kKernelWid = 2.5; //ceil(2.5) int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 3) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) { printfx("Image dimensions too small for Difference of Gaussian.\n"); return 1; } int ret = nifti_smooth_gauss(nim, Sigmamm, Sigmamm, Sigmamm, kKernelWid); if (ret != 0) { printfx("Gaussian smooth failed.\n"); return ret; } int nkernel = 0; //number of voxels in kernel int *kernel = NULL; kernel = make_kernel(nim, &nkernel, 3, 3, 3); //https://en.wikipedia.org/wiki/Discrete_Laplace_operator //27 point stencil // [2 3 2; 3 6 3; 2 3 2]; // [3 6 3; 6 -88 6; 3 6 3]; // [2 3 2; 3 6 3; 2 3 2]; int kernelWeight = floor(INT_MAX / 88.0); int i = nkernel + nkernel + nkernel; //slice below kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; //current slice kernel[i] = -3 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = 88 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; //slice above kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -6 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; kernel[i] = -3 * kernelWeight; i++; kernel[i] = -2 * kernelWeight; i++; enum eOp op = fmeanzerok; //u22 ret = nifti_kernel(nim, op, kernel, nkernel); _mm_free(kernel); if (isEdge) ret = nifti_zero_crossing(nim, 0); #ifdef USING_TIMERS printfx("DoG time: %ld ms\n", timediff(startTime, clockMsec())); #endif return ret; }*/ staticx int nifti_roi(nifti_image *nim, int xmin, int xsize, int ymin, int ysize, int zmin, int zsize, int tmin, int tsize) { // "fslmaths LAS -roi 3 32 0 40 0 40 0 5 f " int nt = nim->nvox / (nim->nx * nim->ny * nim->nz); if ((nim->nvox < 1) || (nt < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt *f32 = (flt *)nim->data; //if (neg_determ(nim)) // do something profound; //determinants do not seem to influence "-roi"? int xmax = xmin + xsize - 1; int ymax = ymin + ysize - 1; int zmax = zmin + zsize - 1; int tmax = tmin + tsize - 1; //printf("%d..%d", zmin, zmax); size_t i = 0; for (int t = 0; t < nt; t++) { int tOK = 1; if ((t < tmin) || (t > tmax)) tOK = 0; for (int z = 0; z < nim->nz; z++) { int zOK = 1; if ((z < zmin) || (z > zmax)) zOK = 0; for (int y = 0; y < nim->ny; y++) { int yOK = 1; if ((y < ymin) || (y > ymax)) yOK = 0; for (int x = 0; x < nim->nx; x++) { int xOK = 1; if ((x < xmin) || (x > xmax)) xOK = 0; if ((xOK == 0) || (yOK == 0) || (zOK == 0) || (tOK == 0)) f32[i] = 0.0; i++; } //x } //y } //z } //t return 0; } staticx int nifti_sobel(nifti_image *nim, int offc, int isBinary) { //sobel is simply one kernel pass per dimension. // this could be achieved with successive passes of "-kernel" // here it is done in a single pass for cache efficiency // https://en.wikipedia.org/wiki/Sobel_operator int vox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if (nim->datatype != DT_CALC) return 1; int nvol = nim->nvox / vox3D; int numk = 6; //center voxel and all its neighbors int *kx = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight int *ky = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight int *kz = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int x = 0; x <= 1; x++) for (int y = -1; y <= 1; y++) { int sgn = (2 * x) - 1; //-1 or +1 int weight = sgn * (2 - abs(y)); //kx compare left and right kx[i + numk] = (2 * x) - 1; //left/right wrap kx[i + numk + numk] = y; //anterior/posterior wrap kx[i] = kx[i + numk] + (kx[i + numk + numk] * (nim->nx)); //voxel offset kx[i + numk + numk + numk] = weight; //weight //ky compare anterior and posterior ky[i + numk] = y; //left/right wrap ky[i + numk + numk] = (2 * x) - 1; //anterior/posterior wrap ky[i] = ky[i + numk] + (ky[i + numk + numk] * (nim->nx)); //voxel offset ky[i + numk + numk + numk] = weight; //weight //kz superior/inferior kz[i + numk] = y; //left/right wrap kz[i + numk + numk] = 0; //anterior/posterior wrap kz[i] = y + (((2 * x) - 1) * nim->nx * nim->ny); //voxel offset kz[i + numk + numk + numk] = weight; //weight //printf("x%d y%d wt%d\n", kx[i+numk], kx[i+numk+numk], kx[i+numk+numk+numk]); //printf("x%d y%d wt%d\n", ky[i+numk], ky[i+numk+numk], ky[i+numk+numk+numk]); i++; } //for y flt *i32 = (flt *)nim->data; //input volumes #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt *iv32 = i32 + (v * vox3D); flt *imgin = _mm_malloc(vox3D * sizeof(flt), 64); //input values prior to blur //edge information: flt mx = 0.0; uint8_t *imgdir = _mm_malloc(vox3D * sizeof(uint8_t), 64); //image direction if (isBinary) memset(imgdir, 0, vox3D * sizeof(uint8_t)); xmemcpy(imgin, iv32, vox3D * sizeof(flt)); int i = 0; for (int z = 0; z < nim->nz; z++) for (int y = 0; y < nim->ny; y++) for (size_t x = 0; x < nim->nx; x++) { //compute z gradient flt gx = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + kx[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x + kx[k + numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kx[k + numk + numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gx += imgin[vx] * kx[k + numk + numk + numk]; } //for k //compute y gradient flt gy = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + ky[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x + ky[k + numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + ky[k + numk + numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gy += imgin[vx] * ky[k + numk + numk + numk]; } //for k //compute z gradient flt gz = 0.0f; //always 0 for 2D, we could add conditional to skip but optimize for 3D for (size_t k = 0; k < numk; k++) { size_t vx = i + kz[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x + kz[k + numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y + kz[k + numk + numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gz += imgin[vx] * kz[k + numk + numk + numk]; } //for k gx = sqr(gx); gy = sqr(gy); gz = sqr(gz); iv32[i] = sqrt(gx + gy + gz); if (isBinary) { mx = MAX(mx, iv32[i]); if ((gx > gy) && (gx > gz)) imgdir[i] = 1; //left/right gradient is strongest else if (gy > gz) imgdir[i] = 2; //anterior/posterior gradient is strongest else imgdir[i] = 3; //superior/inferior gradient is strongest (or tie) } i++; } //for x if (isBinary) { //magnitude in range 0..1, zero voxels below threshold float scale = 1.0; if (mx > 0.0) scale = 1.0 / mx; float thresh = 0.1; for (int vx = 0; vx < vox3D; vx++) { imgin[vx] = iv32[vx] * scale; if (imgin[vx] < thresh) { imgin[vx] = 0.0; continue; } } //zero output: we will not set border voxels memset(iv32, 0, vox3D * sizeof(flt)); // int nx = nim->nx; int nxy = nx * nim->ny; for (int z = 1; z < (nim->nz -1); z++) for (int y = 1; y < (nim->ny - 1); y++) for (size_t x = 1; x < (nim->nx - 1); x++) { int vx = x + (y * nx) + (z * nxy); float val = imgin[vx]; if (val == 0.0) continue; float mxX = MAX(imgin[vx-1],imgin[vx+1]); float mxY = MAX(imgin[vx-nx],imgin[vx+nx]); float mxZ = MAX(imgin[vx-nxy],imgin[vx+nxy]); if ((imgdir[vx] == 1) && (val > mxX) && ((mxY > 0.0) || (mxZ > 0.0)) ) //left/right gradient iv32[vx] = 1.0; else if ((imgdir[vx] == 2) && (val > mxY) && ((mxX > 0.0) || (mxZ > 0.0)) ) //anterior/posterior gradient iv32[vx] = 1.0; else if ((val > mxZ) && ((mxX > 0.0) || (mxY > 0.0)))//head/foot gradient iv32[vx] = 1.0; } nim->scl_inter = 0.0; nim->scl_slope = 1.0; nim->cal_min = 0.0; nim->cal_max = 1.0; } //if isBinary _mm_free(imgdir); _mm_free(imgin); } //for each volume _mm_free(kx); _mm_free(ky); _mm_free(kz); return 0; } //nifti_sobel() #ifndef USING_WASM //WASM does not support changing sform/qform staticx int nifti_subsamp2(nifti_image *nim, int offc) { //naive downsampling: this is provided purely to mimic the behavior of fslmaths // see https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/s0nw827nc4kcnaa/Aliasing.ipynb // no anti-aliasing filter https://en.wikipedia.org/wiki/Image_scaling int invox3D = nim->nx * nim->ny * MAX(nim->nz, 1); //int indim[5]; //for (int i = 1; i < 5; i++) // indim[i] = MAX(nim->dim[i], 1); int nvol = nim->nvox / invox3D; int x_odd = nim->nx % 2; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * 0.5); int ny = ceil(nim->ny * 0.5); int nz = ceil(nim->nz * 0.5); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx * ny * nz; flt *i32 = (flt *)nim->data; void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)); flt *o32 = (flt *)dat; int x_flip = 0; if (!neg_determ(nim)) x_flip = 1; if (offc) { int *wt = _mm_malloc(nvox3D * nvol * sizeof(int), 64); //weight, just for edges for (int i = 0; i < (nvox3D * nvol); i++) { wt[i] = 0; o32[i] = 0.0; } int boost = 0; if ((x_odd) && (x_flip)) boost = 1; size_t i = 0; for (int v = 0; v < nvol; v++) { size_t vo = v * nvox3D; //volumes do not get reduced for (int z = 0; z < nim->nz; z++) { size_t zo = vo + ((z / 2) * ny * nx); for (int y = 0; y < nim->ny; y++) { size_t yo = zo + ((y / 2) * nx); for (int x = 0; x < nim->nx; x++) { size_t xo = yo + ((x + boost) / 2); wt[xo]++; o32[xo] += i32[i]; i++; } //x } //y } //z } //vol for (int i = 0; i < (nvox3D * nvol); i++) if (wt[i] > 0) o32[i] /= wt[i]; _mm_free(wt); } else { //if subsamp2offc else subsamp2 int numk = 27; //center voxel and all its neighbors int *kernel = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { kernel[i] = x + (y * nim->nx) + (z * nim->nx * nim->ny); kernel[i + numk] = x; //left-right wrap detection kernel[i + numk + numk] = y; //anterior-posterior wrap detection kernel[i + numk + numk + numk] = 8 / (pow(2, sqr(x) + sqr(y) + sqr(z))); //kernel weight i++; } int boost = 0; //if ((xflip == 1) && (odd == 0)) boost = 1; if ((x_flip == 1) && (x_odd == 0)) boost = 1; //printf("boost %d\n", boost); size_t nvox3Din = nim->nx * nim->ny * nim->nz; size_t o = 0; for (int v = 0; v < nvol; v++) { size_t vi = v * nvox3Din; for (int z = 0; z < nz; z++) { int zi = (2 * z * nim->nx * nim->ny); //printf("%zu \n", zi); for (int y = 0; y < ny; y++) { int yy = y + y; //y*2 input y int yi = zi + (yy * nim->nx); for (int x = 0; x < nx; x++) { //int xx = x+x+xflip; //x*2 input x int xx = x + x + boost; //x*2 input x int xi = yi + xx; //flt sum = 0.0; //flt wt = 0.0; double sum = 0.0; double wt = 0.0; for (int k = 0; k < numk; k++) { if ((xi + kernel[k]) < 0) continue; //position would be less than 0 - outside volume, avoid negative values in size_t size_t pos = xi + kernel[k]; //offset if (pos >= nvox3Din) continue; //position outside volume, e.g. slice above top of volume int xin = xx + kernel[k + numk]; if ((xin < 0) || (xin >= nim->nx)) continue; //wrap left or right int yin = yy + kernel[k + numk + numk]; if ((yin < 0) || (yin >= nim->ny)) continue; //wrap anterior or posterior flt w = kernel[k + numk + numk + numk]; wt += w; sum += i32[vi + pos] * w; } //if (wt > 0.0) //no need to check: every voxel has at least one contributor (itself) o32[o] = sum / wt; //else { // o32[o] = 666.6; o++; } //x } //y } //z } //vol _mm_free(kernel); } //if subsamp2offc else subsamp2 nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; //nim->dim[1] = nx; //nim->dim[2] = ny; //nim->dim[3] = nz; nim->dx *= 2; nim->dy *= 2; nim->dz *= 2; //nim->pixdim[1] *= 2; //nim->pixdim[2] *= 2; //nim->pixdim[3] *= 2; //adjust origin mat44 m = xform(nim); vec4 vx = setVec4(0, 0, 0); vec4 pos = nifti_vect44mat44_mul(vx, m); //vx = setVec4(0.5,0.5,0.5); //vx = setVec4(1.0,0.0,0.0); if (offc) { //printf("%d flip odd %d\n", x_flip, x_odd); if ((x_odd) && (x_flip)) vx = setVec4(-0.5, -0.5, -0.5); //subsamp2offc else vx = setVec4(0.5, 0.5, 0.5); //subsamp2offc //if (!xflip) { // vx = setVec4(0.5,0.5,0.5); // printf("y\n"); //} } else { if (x_odd) vx = setVec4(0, 0, 0); //subsamp2 else vx = setVec4(1, 0, 0); //subsamp2 if (!x_flip) vx = setVec4(0, 0, 0); } vec4 pos1 = nifti_vect44mat44_mul(vx, m); vx = setVec4(pos1.v[0] - pos.v[0], pos1.v[1] - pos.v[1], pos1.v[2] - pos.v[2]); m.m[0][3] += vx.v[0]; m.m[1][3] += vx.v[1]; m.m[2][3] += vx.v[2]; //scale spatial transform for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) m.m[i][j] *= 2; //apply to both sform and qform in case VTK user for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } staticx int nifti_resize(nifti_image *nim, flt zx, flt zy, flt zz, int interp_method) { //see AFNI's 3dresample //better than fslmaths: fslmaths can not resample 4D data // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni.nii -input rest.nii // time ./sm rest.nii -subsamp2 out.nii //However, aliasing artifacts // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni2.nii -input zoneplate3d_129.nii int invox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / invox3D; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * zx); int ny = ceil(nim->ny * zy); int nz = ceil(nim->nz * zz); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx * ny * nz; flt *i32 = (flt *)nim->data; void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)); flt *o32 = (flt *)dat; #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt *iv32 = i32 + (v * invox3D); //reduce in X: half the width: 1/2 input file size flt *imgx = _mm_malloc(nx * nim->ny * nim->nz * sizeof(flt), 64); //input values prior to blur if (nx == nim->nx) //no change in x dimension xmemcpy(imgx, iv32, nx * nim->ny * nim->nz * sizeof(flt)); else { CLIST *contrib = createFilter(nim->nx, nx, interp_method); size_t i = 0; for (size_t y = 0; y < (nim->ny * nim->nz); y++) { for (int x = 0; x < nx; x++) { flt weight = 0.0; for (int j = 0; j < contrib[x].n; j++) weight += iv32[contrib[x].p[j].pixel] * contrib[x].p[j].weight; imgx[i++] = weight; } iv32 += nim->nx; } //for y for (i = 0; i < nx; i++) free(contrib[i].p); free(contrib); } //reduce in Y: half the height: 1/4 input size flt *imgy = _mm_malloc(nx * ny * nim->nz * sizeof(flt), 64); //input values prior to blur if (ny == nim->ny) //no change in y dimension xmemcpy(imgy, imgx, nx * ny * nim->nz * sizeof(flt)); else { CLIST *contrib = createFilter(nim->ny, ny, interp_method); flt *iny = _mm_malloc(nim->ny * sizeof(flt), 64); //input values prior to resize for (int z = 0; z < nim->nz; z++) { for (int x = 0; x < nx; x++) { int yo = (z * nx * ny) + x; //output int yi = (z * nx * nim->ny) + x; //input for (int j = 0; j < nim->ny; j++) { //iny[j] = imgx[yi+(j*nx)]; iny[j] = imgx[yi]; yi += nx; } for (int y = 0; y < ny; y++) { flt weight = 0.0; for (int j = 0; j < contrib[y].n; j++) weight += iny[contrib[y].p[j].pixel] * contrib[y].p[j].weight; //weight = y; imgy[yo] = weight; yo += nx; } //y } //x } //z _mm_free(iny); for (int i = 0; i < ny; i++) free(contrib[i].p); free(contrib); } _mm_free(imgx); //reduce in Z flt *ov32 = o32 + (v * nvox3D); if (nz == nim->nz) //no change in x dimension xmemcpy(ov32, imgy, nx * ny * nz * sizeof(flt)); else { CLIST *contrib = createFilter(nim->nz, nz, interp_method); flt *inz = _mm_malloc(nim->nz * sizeof(flt), 64); //input values prior to resize int nxy = nx * ny; for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int zo = x + (y * nx); //output offset int zi = x + (y * nx); //input offset for (int j = 0; j < nim->nz; j++) { inz[j] = imgy[zi]; zi += nxy; } for (int z = 0; z < nz; z++) { //for (int j = 0; j < nim->nz; j++) // inz[j] = imgy[zi+(j*nx*ny)]; flt weight = 0.0; for (int j = 0; j < contrib[z].n; j++) weight += inz[contrib[z].p[j].pixel] * contrib[z].p[j].weight; //weight = y; ov32[zo] = weight; zo += nx * ny; } //for z } //for x } //for y _mm_free(inz); for (int i = 0; i < nz; i++) free(contrib[i].p); free(contrib); } _mm_free(imgy); } //for v nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; //nim->dim[1] = nx; //nim->dim[2] = ny; //nim->dim[3] = nz; nim->dx /= zx; nim->dy /= zy; nim->dz /= zz; //nim->pixdim[1] /= zx; //nim->pixdim[2] /= zy; //nim->pixdim[3] /= zz; //adjust origin - again, just like fslmaths mat44 m = xform(nim); m.m[0][0] /= zx; m.m[1][0] /= zx; m.m[2][0] /= zx; m.m[0][1] /= zy; m.m[1][1] /= zy; m.m[2][1] /= zy; m.m[0][2] /= zz; m.m[1][2] /= zz; m.m[2][2] /= zz; for (int i = 0; i < 4; i++) //transform BOTH sform and qform (e.g. ANTs/ITK user) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } #endif //WASM does not support changing sform/qform staticx int essentiallyEqual(float a, float b) { if (isnan(a) && isnan(b)) return 1; //surprisingly, with C nan != nan return fabs(a - b) <= ((fabs(a) > fabs(b) ? fabs(b) : fabs(a)) * epsilon); } staticx int nifti_binary_power(nifti_image *nim, double v) { //clone operations from ANTS ImageMath: power //https://manpages.debian.org/jessie/ants/ImageMath.1.en.html if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; //flt fv = v; flt *f32 = (flt *)nim->data; for (size_t i = 0; i < nim->nvox; i++) f32[i] = pow(f32[i], v); return 0; } struct sortIdx { flt val; int idx; }; staticx int nifti_fillh(nifti_image *nim, int is26) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; //size_t nxy = nim->nx * nim->ny; //slice increment uint8_t *vx = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64); memset(vx, 0, nim->nvox * sizeof(uint8_t)); size_t n1 = 0; flt *f32 = (flt *)nim->data; for (size_t i = 0; i < nim->nvox; i++) if (f32[i] > 0.0) { n1++; vx[i] = 1; } if ((n1 < 1) || (nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) { //if fewer than 3 rows, columns or slices all voxels touch edge. //only a binary threshold, not a flood fill for (size_t i = 0; i < nim->nvox; i++) f32[i] = vx[i]; _mm_free(vx); return 1; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int numk = 6; if (is26) numk = 26; int32_t *k = (int32_t *)_mm_malloc(numk * sizeof(int32_t), 64); //queue with untested seed if (is26) { int j = 0; for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); j++; } //for x } else { //if 26 neighbors else 6.. k[0] = nim->nx * nim->ny; //up k[1] = -k[0]; //down k[2] = nim->nx; //anterior k[3] = -k[2]; //posterior k[4] = 1; //left k[5] = -1; } //https://en.wikipedia.org/wiki/Flood_fill #pragma omp parallel for for (int v = 0; v < nvol; v++) { uint8_t *vxv = vx; vxv += (v * nvox3D); uint8_t *vxs = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64); xmemcpy(vxs, vxv, nvox3D * sizeof(uint8_t)); //dst, src int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed int qlo = 0; int qhi = -1; //ints always signed in C! //load edges size_t i = 0; for (int z = 0; z < nim->nz; z++) { int zedge = 0; if ((z == 0) || (z == (nim->nz - 1))) zedge = 1; for (int y = 0; y < nim->ny; y++) { int yedge = 0; if ((y == 0) || (y == (nim->ny - 1))) yedge = 1; for (int x = 0; x < nim->nx; x++) { if ((vxs[i] == 0) && (zedge || yedge || (x == 0) || (x == (nim->nx - 1)))) { //found new seed vxs[i] = 1; //do not find again qhi++; q[qhi] = i; } // new seed i++; } //for x } //y } //z //printf("seeds %d kernel %d\n", qhi+1, numk); //run a 'first in, first out' queue while (qhi >= qlo) { //retire one seed, add 0..6 new ones (fillh) or 0..26 new ones (fillh26) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; if (vxs[jj] != 0) continue; //add new seed; vxs[jj] = 1; qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested for (size_t i = 0; i < nvox3D; i++) if (vxs[i] == 0) vxv[i] = 1; //hidden internal voxel not found from the fill _mm_free(vxs); _mm_free(q); } //for each volume for (size_t i = 0; i < nim->nvox; i++) f32[i] = vx[i]; _mm_free(vx); _mm_free(k); return 0; } staticx void rand_test() { //https://www.phoronix.com/scan.php?page=news_item&px=Linux-RdRand-Sanity-Check int r0 = rand(); for (int i = 0; i < 7; i++) if (rand() != r0) return; printfx("RDRAND gives funky output: update firmware\n"); } staticx int nifti_unary(nifti_image *nim, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { printfx("nifti_unary: Unsupported datatype %d\n", nim->datatype); return 1; } flt *f32 = (flt *)nim->data; if (op == exp1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = exp(f32[i]); } else if (op == log1) { for (size_t i = 0; i < nim->nvox; i++) { if (f32[i] <= 0.0) f32[i] = 0.0; else f32[i] = log(f32[i]); } } else if (op == floor1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = floor(f32[i]); } else if (op == round1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = round(f32[i]); } else if (op == ceil1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = ceil(f32[i]); } else if (op == trunc1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = trunc(f32[i]); } else if (op == sin1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = sin(f32[i]); } else if (op == cos1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = cos(f32[i]); } else if (op == tan1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = tan(f32[i]); } else if (op == asin1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = asin(f32[i]); } else if (op == acos1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = acos(f32[i]); } else if (op == atan1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = atan(f32[i]); } else if (op == sqr1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = f32[i] * f32[i]; //<- pow(a,x) uses flt for x } else if (op == sqrt1) { nifti_sqrt(f32, nim->nvox); } else if (op == recip1) { //https://stackoverflow.com/questions/10606483/sse-reciprocal-if-not-zero for (size_t i = 0; i < nim->nvox; i++) { if (f32[i] == 0.0f) continue; f32[i] = 1.0 / f32[i]; } } else if (op == abs1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = fabs(f32[i]); } else if (op == bin1) { for (size_t i = 0; i < nim->nvox; i++) { if (f32[i] > 0) f32[i] = 1.0f; else f32[i] = 0.0f; } } else if (op == binv1) { for (size_t i = 0; i < nim->nvox; i++) { if (f32[i] > 0) f32[i] = 0.0f; else f32[i] = 1.0f; } } else if (op == edge1) { if ((nim->dx == 0.0) || (nim->dy == 0.0) || (nim->dz == 0.0)) { printfx("edge requires non-zero pixdim1/pixdim2/pixdim3\n"); return 1; } flt xscl = 1.0 / (sqr(nim->dx)); flt yscl = 1.0 / (sqr(nim->dy)); flt zscl = 1.0 / (sqr(nim->dz)); flt xyzscl = 1.0 / (2.0 * sqrt(xscl + yscl + zscl)); if (nim->nz < 2) { //no slices 'above' or 'below' for 2D size_t nxy = nim->nx * nim->ny; //slice increment int nvol = nim->nvox / nxy; if ((nvol * nxy) != nim->nvox) return 1; #pragma omp parallel for for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0) flt *inp = (flt *)_mm_malloc(nxy * sizeof(flt), 64); flt *o32 = (flt *)f32; o32 += v * nxy; xmemcpy(inp, o32, nxy * sizeof(flt)); //dst, src for (int y = 1; (y < (nim->ny - 1)); y++) { size_t yo = y * nim->nx; for (int x = 1; (x < (nim->nx - 1)); x++) { size_t vx = yo + x; flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl; flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl; o32[vx] = sqrt(xv + yv) * xyzscl; } //x } //y _mm_free(inp); } //for v return 1; } //edge for 2D volume(s) int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; size_t nxy = nim->nx * nim->ny; //slice increment #pragma omp parallel for for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0) flt *inp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); flt *o32 = (flt *)f32; o32 += v * nvox3D; xmemcpy(inp, o32, nvox3D * sizeof(flt)); //dst, src for (int z = 1; (z < (nim->nz - 1)); z++) { size_t zo = z * nxy; for (int y = 1; (y < (nim->ny - 1)); y++) { size_t yo = y * nim->nx; for (int x = 1; (x < (nim->nx - 1)); x++) { size_t vx = zo + yo + x; flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl; flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl; flt zv = sqr(inp[vx + nxy] - inp[vx - nxy]) * zscl; o32[vx] = sqrt(xv + yv + zv) * xyzscl; } //x } //y } //z _mm_free(inp); } //for v return 1; //edge for 3D volume(s) } else if (op == index1) { //nb FSLmaths flips dim[1] depending on determinant #ifndef USING_WASM size_t idx = 0; if (!neg_determ(nim)) { //flip x size_t nyzt = nim->nvox / nim->nx; if ((nyzt * nim->nx) != nim->nvox) return 1; for (size_t i = 0; i < nyzt; i++) { size_t row = i * nim->nx; ; int x = nim->nx; while (x > 0) { x--; if (f32[row + x] != 0) f32[row + x] = idx++; } //for each column (x) } //for each row (yzt) } else //don't flip x for (size_t i = 0; i < nim->nvox; i++) if (f32[i] != 0) f32[i] = idx++; #endif } else if (op == nan1) { for (size_t i = 0; i < nim->nvox; i++) if (isnan(f32[i])) f32[i] = 0.0; } else if (op == nanm1) { for (size_t i = 0; i < nim->nvox; i++) if (isnan(f32[i])) f32[i] = 1.0; else f32[i] = 0.0; } else if (op == rand1) { rand_test(); flt scl = (1.0 / RAND_MAX); for (size_t i = 0; i < nim->nvox; i++) f32[i] += rand() * scl; } else if (op == randn1) { rand_test(); //https://en.wikipedia.org/wiki/Box–Muller_transform //for SIMD see https://github.com/miloyip/normaldist-benchmark static const flt sigma = 1.0f; static const flt mu = 0.0; //static const flt epsilon = FLT_EPSILON; static const flt two_pi = 2.0 * 3.14159265358979323846; static const flt scl = (1.0 / RAND_MAX); //fill pairs for (size_t i = 0; i < (nim->nvox - 1); i += 2) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt su1 = sqrt(-2.0 * log(u1)); flt z0 = su1 * cos(two_pi * u2); flt z1 = su1 * sin(two_pi * u2); f32[i] += z0 * sigma + mu; f32[i + 1] += z1 * sigma + mu; } //if odd, fill final voxel if (nim->nvox % 2 != 0) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2); f32[nim->nvox - 1] += z0 * sigma + mu; } } else if (op == range1) { flt mn = f32[0]; flt mx = mn; for (size_t i = 0; i < nim->nvox; i++) { mn = fmin(f32[i], mn); mx = fmax(f32[i], mx); } nim->cal_min = mn; nim->cal_max = mx; } else if (op == rank1) { #ifndef USING_WASM //WASM does not like qsort int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1) { //you are always first if you are the only one to show up... for (size_t i = 0; i < nim->nvox; i++) f32[i] = 1; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++) { //how do we handle ties? struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64); size_t j = i; for (int v = 0; v < nvol; v++) { k[v].val = f32[j]; k[v].idx = j; j += nvox3D; } int varies = 0; for (int v = 0; v < nvol; v++) { if (k[v].val != k[0].val) { varies = 1; break; } } if (varies) { qsort(k, nvol, sizeof(struct sortIdx), compare); for (int v = 0; v < nvol; v++) f32[k[v].idx] = v + 1; } else { j = i; for (int v = 0; v < nvol; v++) { f32[j] = v + 1; j += nvox3D; } } _mm_free(k); } //for i } //nvol > 1 #endif //WASM does not like qsort } else if ((op == rank1) || (op == ranknorm1)) { #ifndef USING_WASM //WASM does not like qsort int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1) { //you are always first if you are the only one to show up... for (int i = 0; i < nim->nvox; i++) f32[i] = 0; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++) { struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64); size_t j = i; double sum = 0.0; for (int v = 0; v < nvol; v++) { k[v].val = f32[j]; sum += k[v].val; k[v].idx = j; j += nvox3D; } double mean = sum / nvol; double sumSqr = 0.0; for (int v = 0; v < nvol; v++) sumSqr += sqr(k[v].val - mean); double stdev = sqrt(sumSqr / (nvol - 1)); qsort(k, nvol, sizeof(struct sortIdx), compare); //strange formula, but replicates fslmaths, consider nvol=3 rank[2,0,1] will be pval [2.5/3, 1.5/3, 0.5/3] for (int v = 0; v < nvol; v++) f32[k[v].idx] = (stdev * -qginv((double)(v + 0.5) / (double)nvol)) + mean; _mm_free(k); } //for i } //nvol > 1 #endif //WASM does not like qsort } else if (op == ztop1) { for (size_t i = 0; i < nim->nvox; i++) f32[i] = qg(f32[i]); } else if (op == ptoz1) { //given p, return x such that Q(x)=p, for 0 < p < 1 // #ifdef DT32 const flt kNaN = NAN; //const flt kNaN = 0.0 / 0.0; for (size_t i = 0; i < nim->nvox; i++) { if ((f32[i] < 0.0) || (f32[i] > 1.0)) f32[i] = kNaN; else f32[i] = qginv(f32[i]); } } else if ((op == pval1) || (op == pval01)) { int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1) { printfx("permutation tests require 4D datasets.\n"); return 1; } //void *dat = (void *)calloc(1, nvox3D * sizeof(flt)); //flt *o32 = (flt *)dat; flt *o32= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); memset(o32, 0, nvox3D * sizeof(flt)); //zero array #pragma omp parallel for for (int i = 0; i < nvox3D; i++) { size_t vi = i; flt obs = f32[vi]; //observed value - see if it is extreme relative to permutations int nNotZero = 0; int nGreater = 0; int nEqual = 0; //observation in first volume flt f32v0 = f32[vi]; for (int v = 0; v < nvol; v++) { if (f32[vi] != 0) nNotZero++; if (f32[vi] == f32v0) nEqual++; if (f32[vi] >= obs) nGreater++; vi += nvox3D; } if (op == pval1) { //if (nEqual == nvol) // o32[i] = 0.0; //else o32[i] = (double)nGreater / (double)nvol; } else { if (nEqual == nvol) o32[i] = 0.0; else if (obs == 0) o32[i] = 1.0; else //nZero must be at least 1: the observed data is not zero o32[i] = (double)nGreater / (double)(nNotZero); } } //for i nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; //nim->dim[0] = 3; //nim->dim[4] = 1; //free(nim->data); _mm_free(nim->data); nim->data = (void *)o32; } else if (op == cpval1) { int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1) { printfx("permutation tests require 4D datasets.\n"); return 1; } //void *dat = (void *)calloc(1, nvox3D * sizeof(flt)); //flt *o32 = (flt *)dat; flt *o32= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); memset(o32, 0, nvox3D * sizeof(flt)); //zero array flt *vmax = (flt *)_mm_malloc(nvol * sizeof(flt), 64); #pragma omp parallel for for (int v = 1; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0) size_t vo = v * nvox3D; flt mx = f32[vo]; for (int i = 0; i < nvox3D; i++) mx = MAX(mx, f32[vo + i]); vmax[v] = mx; //printf("%d %g\n", v, mx); } #pragma omp parallel for for (int i = 0; i < nvox3D; i++) { flt obs = f32[i]; //observed value - see if it is extreme relative to permutations int nGreater = 1; //count observation for (int v = 1; v < nvol; v++) if (vmax[v] >= obs) nGreater++; o32[i] = (double)nGreater / (double)nvol; } //for i _mm_free(vmax); nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; //nim->dim[0] = 3; //nim->dim[4] = 1; //free(nim->data); //nim->data = dat; _mm_free(nim->data); nim->data = (void *)o32; } else { printfx("nifti_unary: Unsupported operation\n"); return 1; } return 0; } //nifti_unary() staticx int nifti_thrp(nifti_image *nim, double v, enum eOp op) { // -thrp: use following percentage (0-100) of ROBUST RANGE to threshold current image (zero anything below the number) // -thrP: use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold below // -uthrp : use following percentage (0-100) of ROBUST RANGE to upper-threshold current image (zero anything above the number) // -uthrP : use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold above if ((v < 0.0) || (v > 100.0)) { printfx("nifti_thrp: threshold should be between 0..100\n"); return 1; } flt pct2, pct98; int ignoreZeroVoxels = 0; if ((op == thrP) || (op == uthrP)) ignoreZeroVoxels = 1; if (nifti_robust_range(nim, &pct2, &pct98, ignoreZeroVoxels) != 0) return 1; flt thresh = pct2 + ((v / 100.0) * (pct98 - pct2)); int modifyBrightVoxels = 0; flt newIntensity = 0.0; if ((op == clamp) || (op == uclamp)) newIntensity = thresh; if ((op == uthrp) || (op == uthrP) || (op == uclamp)) modifyBrightVoxels = 1; nifti_thr(nim, thresh, modifyBrightVoxels, newIntensity); return 0; } //nifti_thrp() #ifndef USING_WASM staticx int nifti_roc(nifti_image *nim, double fpThresh, const char *foutfile, const char *fnoise, const char *ftruth) { if (nim->datatype != DT_CALC) return 1; //(nim, thresh, argv[outfile], fnoise, argv[truth]); //fslmaths appears to ignore voxels on edge of image, and will crash with small images: // error: sort(): given object has non-finite elements //therefore, there is a margin ("border") around the volume int border = 5; //in voxels int mindim = border + border + 1; //e.g. minimum size has one voxel surrounded by border on each side if ((nim->nx < mindim) || (nim->ny < mindim) || (nim->nz < mindim)) { printfx("volume too small for ROC analyses\n"); return 1; } if (nim->nvox > (nim->nx * nim->ny * nim->nz)) { printfx("ROC input should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } if ((fpThresh <= 0.0) || (fpThresh >= 1.0)) { printfx("ROC false-positive threshold should be between 0 and 1, not '%g'\n", fpThresh); return 1; } nifti_image *nimTrue = nifti_image_read2(ftruth, 1); if (!nimTrue) { printfx("** failed to read NIfTI image from '%s'\n", ftruth); exit(2); } if ((nim->nx != nimTrue->nx) || (nim->ny != nimTrue->ny) || (nim->nz != nimTrue->nz)) { printfx("** Truth image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimTrue->nx, nimTrue->ny, nimTrue->nz); nifti_image_free(nimTrue); exit(1); } if (nimTrue->nvox > (nimTrue->nx * nimTrue->ny * nimTrue->nz)) { printfx("ROC truth should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } nifti_image *nimNoise = NULL; //count number of tests //If the truth image contains negative voxels these get excluded from all calculations int nTest = 0; int nTrue = 0; size_t i = 0; flt *imgTrue = (flt *)nimTrue->data; flt *imgObs = (flt *)nim->data; for (int z = 0; z < nim->nz; z++) for (int y = 0; y < nim->ny; y++) for (int x = 0; x < nim->nx; x++) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) { nTest++; if (imgTrue[i] > 0) nTrue++; } i++; } if (nTest < 1) { printfx("** All truth voxels inside border are negative\n"); exit(1); } //printf("%d %d = %d\n", nTrue, nFalse, nTest); if (nTest == nTrue) printfx("Warning: All truth voxels inside border are the same (all true or all false)\n"); struct sortIdx *k = (struct sortIdx *)_mm_malloc(nTest * sizeof(struct sortIdx), 64); //load the data nTest = 0; i = 0; for (int z = 0; z < nim->nz; z++) for (int y = 0; y < nim->ny; y++) for (int x = 0; x < nim->nx; x++) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) { k[nTest].val = imgObs[i]; k[nTest].idx = imgTrue[i] > 0; nTest++; } i++; } qsort(k, nTest, sizeof(struct sortIdx), compare); //for (int v = 0; v < nvol; v++ ) // f32[ k[v].idx ] = v + 1; //printf("%d tests, intensity range %g..%g\n", nTest, k[0].val, k[nTest-1].val); FILE *txt = fopen(foutfile, "w+"); flt threshold = k[nTest - 1].val; //maximum observed intensity int bins = 1000; //step size: how often are results reported flt step = (threshold - k[0].val) / bins; //[max-min]/bins int fp = 0; int tp = 0; if (fnoise != NULL) { nimNoise = nifti_image_read2(fnoise, 1); if ((nim->nx != nimNoise->nx) || (nim->ny != nimNoise->ny) || (nim->nz != nimNoise->nz)) { printfx("** Noise image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimNoise->nx, nimNoise->ny, nimNoise->nz); nifti_image_free(nimTrue); nifti_image_free(nimNoise); exit(1); } //Matlab script roc.m generates samples you can process with fslmaths.\ // The fslmaths text file includes two additional columns of output not described by the help documentation // Appears to find maximum signal in each noise volume, regardless of whether it is a hit or false alarm. int nvox3D = nim->nx * nim->ny * nim->nz; int nvol = nimNoise->nvox / nvox3D; if (nvol < 10) printfx("Warning: Noise images should include many volumes for estimating familywise error/\n"); flt *imgNoise = (flt *)nimNoise->data; flt *mxVox = (flt *)_mm_malloc(nvol * sizeof(flt), 64); for (int v = 0; v < nvol; v++) { //for each volume mxVox[v] = -INFINITY; size_t vo = v * nvox3D; size_t vi = 0; for (int z = 0; z < nim->nz; z++) for (int y = 0; y < nim->ny; y++) for (int x = 0; x < nim->nx; x++) { if ((imgTrue[vi] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) mxVox[v] = MAX(mxVox[v], imgNoise[vo + vi]); vi++; } } //for each volume nifti_image_free(nimNoise); qsort(mxVox, nvol, sizeof(flt), compare); int idx = nTest - 1; flt mxNoise = mxVox[nvol - 1]; while ((idx >= 1) && (k[idx].val > mxNoise)) { tp++; idx--; if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) { fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold); threshold = threshold - step; //delay next report } } //more significant than any noise... int fpThreshInt = round(fpThresh * nvol); //stop when number of false positives exceed this for (int i = nvol - 1; i >= 1; i--) { fp++; //false alarm while ((idx >= 1) && (k[idx].val >= mxVox[i])) { tp++; idx--; if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) { fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold); threshold = threshold - step; //delay next report } } //at least as significant as current noise if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... _mm_free(mxVox); exit(1); } else { //if noise image else infer FP/TP from input image int nFalse = nTest - nTrue; int fpThreshInt = ceil(fpThresh * nFalse); //stop when number of false positives exceed this for (int i = nTest - 1; i >= 1; i--) { if (k[i].idx == 0) fp++; //false alarm else tp++; //hit if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp / (double)nFalse, (double)tp / (double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... } //if noise else... fclose(txt); _mm_free(k); nifti_image_free(nimTrue); return 0; } staticx int nifti_binary(nifti_image *nim, char *fin, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { printfx("nifti_binary: Unsupported datatype %d\n", nim->datatype); return 1; } nifti_image *nim2 = nifti_image_read2(fin, 1); if (!nim2) { printfx("** failed to read NIfTI image from '%s'\n", fin); return 2; } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) { printfx("** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz); nifti_image_free(nim2); return 1; } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm printfx("WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); printfx(" Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free(nim2); return 1; } flt *imga = (flt *)nim->data; flt *imgb = (flt *)nim2->data; int nvox3D = nim->nx * nim->ny * nim->nz; int nvola = nim->nvox / nvox3D; int nvolb = nim2->nvox / nvox3D; int rem0 = 0; int swap4D = 0; //if 1: input nim was 3D, but nim2 is 4D: output will be 4D if ((nvolb > 1) && (nim->nvox != nim2->nvox) && ((op == uthr) || (op == thr))) { //"niimath 3D -uthr 4D out" only uses 1st volume of 4D, only one volume out nvolb = 1; //fslmaths printfx("threshold operation expects 3D mask\n"); //fslmaths makes not modification to image if (op == uthr) //strictly for fslmaths compatibility - makes no sense for (size_t i = 0; i < nim->nvox; i++) imga[i] = 0; nifti_image_free(nim2); return 0; } else if (nim->nvox != nim2->nvox) { //situation where one input is 3D and the other is 4D if ((nvola != 1) && ((nvolb != 1))) { printfx("nifti_binary: both images must have the same number of volumes, or one must have a single volume (%d and %d)\n", nvola, nvolb); nifti_image_free(nim2); return 1; } if (nvola == 1) { imgb = (flt *)nim->data; imga = (flt *)nim2->data; swap4D = 1; nvolb = nim->nvox / nvox3D; nvola = nim2->nvox / nvox3D; } } //make it so imga/novla >= imgb/nvolb for (int v = 0; v < nvola; v++) { // int va = v * nvox3D; //start of volume for image A int vb = (v % nvolb) * nvox3D; //start of volume for image B if (op == add) { for (int i = 0; i < nvox3D; i++) imga[va + i] += imgb[vb + i]; } else if (op == sub) { if (swap4D) { for (int i = 0; i < nvox3D; i++) { imga[va + i] = imgb[vb + i] - imga[va + i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); imga[va + i] = imga[va + i] - imgb[vb + i]; } } } else if (op == mul) { for (int i = 0; i < nvox3D; i++) imga[va + i] *= imgb[vb + i]; } else if (op == max) { for (int i = 0; i < nvox3D; i++) imga[va + i] = MAX(imga[va + i], imgb[vb + i]); } else if (op == min) { for (int i = 0; i < nvox3D; i++) imga[va + i] = MIN(imga[va + i], imgb[vb + i]); } else if (op == thr) { //thr : use following number to threshold current image (zero anything below the number) for (int i = 0; i < nvox3D; i++) if (imga[va + i] < imgb[vb + i]) imga[va + i] = 0; } else if (op == uthr) { //uthr : use following number to upper-threshold current image (zero anything above the number) for (int i = 0; i < nvox3D; i++) if (imga[va + i] > imgb[vb + i]) imga[va + i] = 0; } else if (op == mas) { if (swap4D) { for (int i = 0; i < nvox3D; i++) { if (imga[va + i] > 0) imga[va + i] = imgb[vb + i]; else imga[va + i] = 0; } } else { for (int i = 0; i < nvox3D; i++) if (imgb[vb + i] <= 0) imga[va + i] = 0; } } else if (op == divX) { if (swap4D) { for (int i = 0; i < nvox3D; i++) { //flt x = imga[va+i]; if (imga[va + i] != 0.0f) imga[va + i] = imgb[vb + i] / imga[va + i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); if (imgb[vb + i] == 0.0f) imga[va + i] = 0.0f; else imga[va + i] = imga[va + i] / imgb[vb + i]; } } } else if (op == mod) { //afni mod function, divide by zero yields 0 (unlike Matlab, see remtest.m) //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (imga[va + i] != 0.0f) imga[va + i] = fmod(imgb[vb + i], imga[va + i]); else { rem0 = 1; imga[va + i] = 0; //imgb[vb+i]; } } } else { for (int i = 0; i < nvox3D; i++) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (imgb[vb + i] != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va + i] = fmod(imga[va + i], imgb[vb + i]); else { rem0 = 1; imga[va + i] = 0; } } } } else if (op == rem) { //fmod _rem //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (trunc(imga[va + i]) != 0.0f) imga[va + i] = fmod(trunc(imgb[vb + i]), trunc(imga[va + i])); else { rem0 = 1; imga[va + i] = imgb[vb + i]; } } } else { for (int i = 0; i < nvox3D; i++) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (trunc(imgb[vb + i]) != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va + i] = fmod(trunc(imga[va + i]), trunc(imgb[vb + i])); else rem0 = 1; } } } else { printfx("nifti_binary: unsupported operation %d\n", op); nifti_image_free(nim2); return 1; } } if (swap4D) { //if 1: input nim was 3D, but nim2 is 4D: output will be 4D nim->nvox = nim2->nvox; nim->ndim = nim2->ndim; nim->nt = nim2->nt; nim->nu = nim2->nu; nim->nv = nim2->nv; nim->nw = nim2->nw; //for (int i = 4; i < 8; i++) { //nim->dim[i] = nim2->dim[i]; //nim->pixdim[i] = nim2->pixdim[i]; //} nim->dt = nim2->dt; nim->du = nim2->du; nim->dv = nim2->dv; nim->dw = nim2->dw; free(nim->data); nim->data = nim2->data; nim2->data = NULL; } nifti_image_free(nim2); if (rem0) { printfx("Warning -rem image included zeros (fslmaths exception)\n"); return 0; } return 0; } // nifti_binary() staticx void nifti_compare(nifti_image *nim, char *fin) { if (nim->nvox < 1) exit(1); if (nim->datatype != DT_CALC) { printfx("nifti_compare: Unsupported datatype %d\n", nim->datatype); exit(1); } nifti_image *nim2 = nifti_image_read2(fin, 1); if (!nim2) { printfx("** failed to read NIfTI image from '%s'\n", fin); exit(2); } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) { printfx("** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 "vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz); nifti_image_free(nim2); exit(1); } if (nim->nvox != nim2->nvox) { printfx(" Number of volumes differ\n"); nifti_image_free(nim2); exit(1); } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm printfx("WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); printfx(" Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free(nim2); exit(1); } flt *img = (flt *)nim->data; flt *img2 = (flt *)nim2->data; size_t differentVox = nim->nvox; double sum = 0.0; double sum2 = 0.0; double maxDiff = 0.0; size_t nNotNan = 0; size_t nDifferent = 0; for (size_t i = 0; i < nim->nvox; i++) { if (!essentiallyEqual(img[i], img2[i])) { if (fabs(img[i] - img2[i]) > maxDiff) { differentVox = i; maxDiff = fabs(img[i] - img2[i]); } nDifferent++; } if (isnan(img[i]) || isnan(img[i])) continue; nNotNan++; sum += img[i]; sum2 += img2[i]; } if (differentVox >= nim->nvox) { //printfx("Images essentially equal\n"); */ nifti_image_free(nim2); exit(0); } //second pass - one pass correlation is inaccurate or slow nNotNan = MAX(1, nNotNan); flt mn = INFINITY; //do not set to item 1, in case it is nan flt mx = -INFINITY; flt sd = 0.0; flt ave = sum / nNotNan; flt mn2 = INFINITY; flt mx2 = -INFINITY; flt sd2 = 0.0; flt ave2 = sum2 / nNotNan; //for i := 0 to (n - 1) do // sd := sd + sqr(y[i] - mn); //sd := sqrt(sd / (n - 1)); double sumDx = 0.0; for (size_t i = 0; i < nim->nvox; i++) { if (isnan(img[i]) || isnan(img[i])) continue; mn = MIN(mn, img[i]); mx = MAX(mx, img[i]); sd += sqr(img[i] - ave); mn2 = MIN(mn2, img2[i]); mx2 = MAX(mx2, img2[i]); sd2 += sqr(img2[i] - ave2); sumDx += (img[i] - ave) * (img2[i] - ave2); } double r = 0.0; nNotNan = MAX(2, nNotNan); if (nim->nvox < 2) { sd = 0.0; sd2 = 0.0; } else { sd = sqrt(sd / (nNotNan - 1)); //if (sd != 0.0) sd = 1.0/sd; sd2 = sqrt(sd2 / (nNotNan - 1)); //if (sd2 != 0.0) sd2 = 1.0/sd2; if ((sd * sd2) != 0.0) r = sumDx / (sd * sd2 * (nNotNan - 1)); //r = r / (nim->nvox - 1); } r = MIN(r, 1.0); r = MAX(r, -1.0); printfx("Images Differ: Correlation r = %g, identical voxels %d%%\n", r, (int)floor(100.0 * (1.0 - (double)nDifferent / (double)nim->nvox))); if (nNotNan < nim->nvox) { printfx(" %" PRId64 " voxels have a NaN in at least one image.\n", nim->nvox - nNotNan); printfx(" Descriptives consider voxels that are numeric in both images.\n"); } printfx(" Most different voxel %g vs %g (difference %g)\n", img[differentVox], img2[differentVox], maxDiff); int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; size_t vx[4]; vx[3] = differentVox / nvox3D; vx[2] = (differentVox / (nim->nx * nim->ny)) % nim->nz; vx[1] = (differentVox / nim->nx) % nim->ny; vx[0] = differentVox % nim->nx; printfx(" Most different voxel location %zux%zux%zu volume %zu\n", vx[0], vx[1], vx[2], vx[3]); printfx("Image 1 Descriptives\n"); printfx(" Range: %g..%g Mean %g StDev %g\n", mn, mx, ave, sd); printfx("Image 2 Descriptives\n"); printfx(" Range: %g..%g Mean %g StDev %g\n", mn2, mx2, ave2, sd2); //V1 comparison - EXIT_SUCCESS if all vectors are parallel (for DWI up vector [1 0 0] has same direction as down [-1 0 0]) if (nVol != 3) { nifti_image_free(nim2); exit(1); } int allParallel = 1; //niimath ft_V1 -compare nt_V1 for (size_t i = 0; i < nvox3D; i++) { //check angle of two vectors... assume unit vectors flt v[3]; //vector, image 1 v[0] = img[i]; v[1] = img[i + nvox3D]; v[2] = img[i + nvox3D + nvox3D]; flt v2[3]; //vector, image 2 v2[0] = img2[i]; v2[1] = img2[i + nvox3D]; v2[2] = img2[i + nvox3D + nvox3D]; flt x[3]; //cross product x[0] = (v[1] * v2[2]) - (v[2] * v2[1]); x[1] = (v[2] * v2[0]) - (v[0] * v2[2]); x[2] = (v[0] * v2[1]) - (v[1] * v2[0]); flt len = sqrt((x[0] * x[0]) + (x[1] * x[1]) + (x[2] * x[2])); if (len > 0.01) { allParallel = 0; //printfx("[%g %g %g] vs [%g %g %g]\n", v[0],v[1], v[2], v2[0], v2[1], v2[2]); break; } } if (allParallel) { printfx("Despite polarity differences, all vectors are parallel.\n"); nifti_image_free(nim2); exit(0); } nifti_image_free(nim2); exit(1); } //nifti_compare() #ifdef DT32 int main32X(int argc, char *argv[]) { #else int main64X(int argc, char *argv[]) { #endif char *fin = NULL, *fout = NULL; //fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths) //fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod //fslmaths robust range not fully described, this emulation is close //fslmaths ing/inm are listed as "unary" but should be listed as binary if (argc < 3) { printfx("Fatal: show_help shown by wrapper function\n"); exit(1); } int dtCalc = DT_FLOAT32; //data type for calculation int dtOut = DT_FLOAT32; //data type for calculation int ac = 1; // '-dt' sets datatype for calculations if (!strcmp(argv[ac], "-dt")) { if (!strcmp(argv[ac + 1], "double")) { dtCalc = DT_FLOAT64; } else if (strcmp(argv[ac + 1], "float")) { printfx("'-dt' error: only float or double calculations supported\n"); return 1; } ac += 2; if (argc < (ac + 2)) return 1; //insufficient arguments remain } //special case: pass through // no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz" // note fslmaths would save as flt type... but lossless conversion in native format is faster // note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max if (ac + 2 == argc) { fin = argv[ac]; // no string copy, just pointer assignment ac++; nifti_image *nim = nifti_image_read(fin, 1); fout = argv[ac]; // no string copy, just pointer assignment ac++; if (nifti_set_filenames(nim, fout, 0, 1)) return 1; nifti_save(nim, ""); //nifti_image_write( nim ); nifti_image_free(nim); return 0; } //end pass through // next argument is input file fin = argv[ac]; // no string copy, just pointer assignment ac++; //clock_t startTime = clock(); nifti_image *nim = nifti_image_read2(fin, 1); if (!nim) { printfx("** failed to read NIfTI image from '%s'\n", fin); return 2; } //printf("read time: %ld ms\n", timediff(startTime, clock())); in_hdr ihdr = set_input_hdr(nim); //check for "-odt" must be last couplet if (!strcmp(argv[argc - 2], "-odt")) { if (!strcmp(argv[argc - 1], "double")) { dtOut = DT_FLOAT64; } else if (!strcmp(argv[argc - 1], "flt")) { dtOut = DT_FLOAT32; } else if (!strcmp(argv[argc - 1], "int")) { dtOut = DT_INT32; } else if (!strcmp(argv[argc - 1], "short")) { dtOut = DT_INT16; } else if (!strcmp(argv[argc - 1], "ushort")) { dtOut = DT_UINT16; } else if (!strcmp(argv[argc - 1], "char")) { dtOut = DT_UINT8; } else if (!strcmp(argv[argc - 1], "input")) { dtOut = nim->datatype; //ihdr.datatype; //! } else { printfx("Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc - 1]); return 2; } argc = argc - 2; } //odt //convert data to calculation type (-dt) if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0) return 1; //check output filename, e.g does file exist fout = argv[argc - 1]; // no string copy, just pointer assignment if (nifti_set_filenames(nim, fout, 0, 1)) return 1; argc = argc - 1; #if defined(_OPENMP) const int maxNumThreads = omp_get_max_threads(); const char *key = "AFNI_COMPRESSOR"; char *value; value = getenv(key); //export AFNI_COMPRESSOR=PIGZ char pigzKey[5] = "PIGZ"; if ((value != NULL) && (strstr(value, pigzKey))) { omp_set_num_threads(maxNumThreads); printfx("Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(1); printfx("Single threaded\n"); } #endif //read operations int nkernel = 0; //number of voxels in kernel int *kernel = make_kernel(nim, &nkernel, 3, 3, 3); char *end; int ok = 0; while (ac < argc) { enum eOp op = unknown; if (!strcmp(argv[ac], "-add")) op = add; if (!strcmp(argv[ac], "-sub")) op = sub; if (!strcmp(argv[ac], "-mul")) op = mul; if (!strcmp(argv[ac], "-div")) op = divX; if (!strcmp(argv[ac], "-rem")) op = rem; if (!strcmp(argv[ac], "-mod")) op = mod; if (!strcmp(argv[ac], "-mas")) op = mas; if (!strcmp(argv[ac], "-thr")) op = thr; if (!strcmp(argv[ac], "-thrp")) op = thrp; if (!strcmp(argv[ac], "-thrP")) op = thrP; if (!strcmp(argv[ac], "-uthr")) op = uthr; if (!strcmp(argv[ac], "-uthrp")) op = uthrp; if (!strcmp(argv[ac], "-uthrP")) op = uthrP; if (!strcmp(argv[ac], "-clamp")) op = clamp; if (!strcmp(argv[ac], "-uclamp")) op = uclamp; if (!strcmp(argv[ac], "-max")) op = max; if (!strcmp(argv[ac], "-min")) op = min; if (!strcmp(argv[ac], "-max")) op = max; //if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas //if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas if (!strcmp(argv[ac], "-power")) op = power; if (!strcmp(argv[ac], "-seed")) op = seed; //if ( ! strcmp(argv[ac], "-restart") ) op = restart; //if ( ! strcmp(argv[ac], "-save") ) op = save; if (!strcmp(argv[ac], "-inm")) op = inm; if (!strcmp(argv[ac], "-ing")) op = ing; if (!strcmp(argv[ac], "-s")) op = smth; if (!strcmp(argv[ac], "-exp")) op = exp1; if (!strcmp(argv[ac], "-ceil")) op = ceil1; if (!strcmp(argv[ac], "-round")) op = round1; if (!strcmp(argv[ac], "-floor")) op = floor1; if (!strcmp(argv[ac], "-trunc")) op = trunc1; if (!strcmp(argv[ac], "-log")) op = log1; if (!strcmp(argv[ac], "-sin")) op = sin1; if (!strcmp(argv[ac], "-cos")) op = cos1; if (!strcmp(argv[ac], "-tan")) op = tan1; if (!strcmp(argv[ac], "-asin")) op = asin1; if (!strcmp(argv[ac], "-acos")) op = acos1; if (!strcmp(argv[ac], "-atan")) op = atan1; if (!strcmp(argv[ac], "-sqr")) op = sqr1; if (!strcmp(argv[ac], "-sqrt")) op = sqrt1; if (!strcmp(argv[ac], "-recip")) op = recip1; if (!strcmp(argv[ac], "-abs")) op = abs1; if (!strcmp(argv[ac], "-bin")) op = bin1; if (!strcmp(argv[ac], "-binv")) op = binv1; if (!strcmp(argv[ac], "-edge")) op = edge1; if (!strcmp(argv[ac], "-index")) op = index1; if (!strcmp(argv[ac], "-nan")) op = nan1; if (!strcmp(argv[ac], "-nanm")) op = nanm1; if (!strcmp(argv[ac], "-rand")) op = rand1; if (!strcmp(argv[ac], "-randn")) op = randn1; if (!strcmp(argv[ac], "-range")) op = range1; if (!strcmp(argv[ac], "-rank")) op = rank1; if (!strcmp(argv[ac], "-ranknorm")) op = ranknorm1; if (!strcmp(argv[ac], "-ztop")) op = ztop1; if (!strcmp(argv[ac], "-ptoz")) op = ptoz1; if (!strcmp(argv[ac], "-pval")) op = pval1; if (!strcmp(argv[ac], "-pval0")) op = pval01; if (!strcmp(argv[ac], "-cpval")) op = cpval1; //kernel operations if (!strcmp(argv[ac], "-dilM")) op = dilMk; if (!strcmp(argv[ac], "-dilD")) op = dilDk; if (!strcmp(argv[ac], "-dilF")) op = dilFk; if (!strcmp(argv[ac], "-dilall")) op = dilallk; if (!strcmp(argv[ac], "-ero")) op = erok; if (!strcmp(argv[ac], "-eroF")) op = eroFk; if (!strcmp(argv[ac], "-fmedian")) op = fmediank; if (!strcmp(argv[ac], "-fmean")) op = fmeank; if (!strcmp(argv[ac], "-fmeanu")) op = fmeanuk; if (!strcmp(argv[ac], "-p")) { ac++; #if defined(_OPENMP) int nProcessors = atoi(argv[ac]); if (nProcessors < 1) { omp_set_num_threads(maxNumThreads); printfx("Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(nProcessors); printfx("Using %d threads\n", nProcessors); } #else printfx("Warning: not compiled for OpenMP: '-p' ignored\n"); #endif } else //All Dimensionality reduction operations names begin with Capital letter, no other commands do! if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper int dim = 0; switch (argv[ac][1]) { case 'X': // dim = 1; break; case 'Y': // code to be executed if n = 2; dim = 2; break; case 'Z': // dim = 3; break; case 'T': // code to be executed if n = 2; dim = 4; break; } if (dim == 0) { printfx("Error: unknown dimensionality reduction operation: %s\n", argv[ac]); goto fail; } if (strstr(argv[ac], "mean")) ok = nifti_dim_reduce(nim, Tmean, dim, 0); else if (strstr(argv[ac], "std")) ok = nifti_dim_reduce(nim, Tstd, dim, 0); else if (strstr(argv[ac], "maxn")) ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max else if (strstr(argv[ac], "max")) ok = nifti_dim_reduce(nim, Tmax, dim, 0); else if (strstr(argv[ac], "min")) ok = nifti_dim_reduce(nim, Tmin, dim, 0); else if (strstr(argv[ac], "median")) ok = nifti_dim_reduce(nim, Tmedian, dim, 0); else if (strstr(argv[ac], "perc")) { ac++; int pct = atoi(argv[ac]); ok = nifti_dim_reduce(nim, Tperc, dim, pct); } else if (strstr(argv[ac], "ar1")) ok = nifti_dim_reduce(nim, Tar1, dim, 0); else { printfx("Error unknown dimensionality reduction operation: %s\n", argv[ac]); ok = 1; } } else if (!strcmp(argv[ac], "-roi")) { //int , int , int , int , int , int , int , int ) if ((argc - ac) < 8) { printfx("not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes goto fail; } ac++; int xmin = atoi(argv[ac]); ac++; int xsize = atoi(argv[ac]); ac++; int ymin = atoi(argv[ac]); ac++; int ysize = atoi(argv[ac]); ac++; int zmin = atoi(argv[ac]); ac++; int zsize = atoi(argv[ac]); ac++; int tmin = atoi(argv[ac]); ac++; int tsize = atoi(argv[ac]); nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize); } else if (!strcmp(argv[ac], "-bptfm")) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0); } else if (!strcmp(argv[ac], "-bptf")) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1); #ifdef bandpass } else if (!strcmp(argv[ac], "-bandpass")) { // niimath test4D -bandpass 0.08 0.008 0 c ac++; double lp_hz = strtod(argv[ac], &end); ac++; double hp_hz = strtod(argv[ac], &end); ac++; double TRsec = strtod(argv[ac], &end); ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec); #endif } else if (!strcmp(argv[ac], "-roc")) { //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> ac++; double thresh = strtod(argv[ac], &end); ac++; int outfile = ac; char *fnoise = NULL; if (thresh > 0.0) { ac++; fnoise = argv[ac]; } ac++; int truth = ac; //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]); if (ac >= argc) { printfx("Error: no output filename specified!\n"); //e.g. volume size might differ goto fail; } } else if (!strcmp(argv[ac], "-unsharp")) { ac++; double sigma = strtod(argv[ac], &end); ac++; double amount = strtod(argv[ac], &end); nifti_unsharp(nim, sigma, sigma, sigma, amount); } else if (strstr(argv[ac], "-otsu")) { ac ++; int mode = atoi(argv[ac]); ok = nifti_otsu(nim, mode, 1); } else if (strstr(argv[ac], "-dehaze")) { ac ++; int mode = atoi(argv[ac]); int zeroFill = 0; if (mode < 0) zeroFill = -1; mode = abs(mode); ok = nifti_otsu(nim, mode, zeroFill); #ifdef bwlabelx } else if (strstr(argv[ac], "-bwlabel")) { ac ++; int conn = atoi(argv[ac]); ok = bwlabel(nim, conn); #endif } else if (!strcmp(argv[ac], "-h2c")) ok = nifti_h2c(nim); else if (!strcmp(argv[ac], "-c2h")) ok = nifti_c2h(nim); else if (!strcmp(argv[ac], "-subsamp2")) ok = nifti_subsamp2(nim, 0); else if (!strcmp(argv[ac], "-subsamp2offc")) ok = nifti_subsamp2(nim, 1); else if (!strcmp(argv[ac], "-sobel_binary")) ok = nifti_sobel(nim, 1, 1); else if (!strcmp(argv[ac], "-sobel")) ok = nifti_sobel(nim, 1, 0); else if (!strcmp(argv[ac], "-demean")) ok = nifti_demean(nim); else if (!strcmp(argv[ac], "-detrend")) ok = nifti_detrend_linear(nim); else if (!strcmp(argv[ac], "-resize")) { ac++; double X = strtod(argv[ac], &end); ac++; double Y = strtod(argv[ac], &end); ac++; double Z = strtod(argv[ac], &end); ac++; int interp_method = atoi(argv[ac]); ok = nifti_resize(nim, X, Y, Z, interp_method); } else if (!strcmp(argv[ac], "-crop")) { ac++; int tmin = atoi(argv[ac]); ac++; int tsize = atoi(argv[ac]); ok = nifti_crop(nim, tmin, tsize); } else if (!strcmp(argv[ac], "--compare")) { //--function terminates without saving image ac++; nifti_compare(nim, argv[ac]); //always terminates } else if (!strcmp(argv[ac], "-edt")) ok = nifti_edt(nim); else if (!strcmp(argv[ac], "-fillh")) ok = nifti_fillh(nim, 0); else if (!strcmp(argv[ac], "-fillh26")) ok = nifti_fillh(nim, 1); else if (!strcmp(argv[ac], "-kernel")) { ac++; if (kernel != NULL) _mm_free(kernel); kernel = NULL; if (!strcmp(argv[ac], "3D")) kernel = make_kernel(nim, &nkernel, 3, 3, 3); if (!strcmp(argv[ac], "2D")) kernel = make_kernel(nim, &nkernel, 3, 3, 1); if (!strcmp(argv[ac], "boxv")) { ac++; int vx = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx, vx, vx); } if (!strcmp(argv[ac], "sphere")) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_sphere(nim, &nkernel, mm); } if (!strcmp(argv[ac], "file")) { ac++; kernel = make_kernel_file(nim, &nkernel, argv[ac]); } if (!strcmp(argv[ac], "gauss")) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_gauss(nim, &nkernel, mm); } if (!strcmp(argv[ac], "box")) { //all voxels in a cube of width <size> mm centered on target voxel"); ac++; double mm = strtod(argv[ac], &end); int vx = (2 * floor(mm / nim->dx)) + 1; int vy = (2 * floor(mm / nim->dy)) + 1; int vz = (2 * floor(mm / nim->dz)) + 1; kernel = make_kernel(nim, &nkernel, vx, vy, vz); } if (!strcmp(argv[ac], "boxv3")) { ac++; int vx = atoi(argv[ac]); ac++; int vy = atoi(argv[ac]); ac++; int vz = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx, vy, vz); } if (kernel == NULL) { printfx("Error: '-kernel' option failed.\n"); //e.g. volume size might differ ok = 1; } } else if (!strcmp(argv[ac], "-tensor_2lower")) { ok = nifti_tensor_2(nim, 0); } else if (!strcmp(argv[ac], "-tensor_2upper")) { ok = nifti_tensor_2(nim, 1); } else if (!strcmp(argv[ac], "-tensor_decomp")) { ok = nifti_tensor_decomp(nim, 1); } else if (!strcmp(argv[ac], "-tensor_decomp_lower")) { ok = nifti_tensor_decomp(nim, 0); } else if (!strcmp(argv[ac], "-save")) { ac++; char *fout2 = argv[ac]; if (nifti_set_filenames(nim, fout2, 1, 1)) ok = 1; else { nifti_save(nim, ""); //nifti_image_write( nim ); nifti_set_filenames(nim, fout, 1, 1); } } else if (!strcmp(argv[ac], "-restart")) { if (kernel != NULL) printfx("Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ nifti_image_free(nim); if (kernel != NULL) _mm_free(kernel); kernel = make_kernel(nim, &nkernel, 3, 3, 3); ac++; nim = nifti_image_read(argv[ac], 1); if (!nim) ok = 1; //error } else if (!strcmp(argv[ac], "-grid")) { ac++; double v = strtod(argv[ac], &end); ac++; int s = atoi(argv[ac]); ok = nifti_grid(nim, v, s); //} else if (!strcmp(argv[ac], "-dog")) { } else if (strstr(argv[ac], "-dog")) { int orient = 0; if (strstr(argv[ac], "-dogx")) orient = 1; if (strstr(argv[ac], "-dogy")) orient = 2; if (strstr(argv[ac], "-dogz")) orient = 3; if (strstr(argv[ac], "-dogr")) orient = -1; ac++; double pos = strtod(argv[ac], &end); ac++; double neg = strtod(argv[ac], &end); ok = nifti_dog(nim, pos, neg, orient); } else if (!strcmp(argv[ac], "-tfce")) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ok = nifti_tfce(nim, H, E, c); } else if (!strcmp(argv[ac], "-tfceS")) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ac++; int x = atoi(argv[ac]); ac++; int y = atoi(argv[ac]); ac++; int z = atoi(argv[ac]); ac++; double tfce_thresh = strtod(argv[ac], &end); ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh); } else if (op == unknown) { printfx("!!Error: unsupported operation '%s'\n", argv[ac]); goto fail; } if ((op >= dilMk) && (op <= fmeanuk)) ok = nifti_kernel(nim, op, kernel, nkernel); if ((op >= exp1) && (op <= ptoz1)) nifti_unary(nim, op); if ((op >= add) && (op < exp1)) { //binary operations ac++; double v = strtod(argv[ac], &end); //if (end == argv[ac]) { if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4" if ((op == power) || (op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed)) { printfx("Error: '%s' expects numeric value\n", argv[ac - 1]); goto fail; } else ok = nifti_binary(nim, argv[ac], op); } else { if (op == add) ok = nifti_rescale(nim, 1.0, v); if (op == sub) ok = nifti_rescale(nim, 1.0, -v); if (op == mul) ok = nifti_rescale(nim, v, 0.0); if (op == divX) ok = nifti_rescale(nim, 1.0 / v, 0.0); if (op == mod) ok = nifti_rem(nim, v, 1); if (op == rem) ok = nifti_rem(nim, v, 0); if (op == mas) { printfx("Error: -mas expects image not number\n"); goto fail; } if (op == power) ok = nifti_binary_power(nim, v); if (op == thr) ok = nifti_thr(nim, v, 0, 0.0); if ((op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP)) ok = nifti_thrp(nim, v, op); if (op == uthr) ok = nifti_thr(nim, v, 1, 0.0); if (op == max) ok = nifti_max(nim, v, 0); if (op == min) ok = nifti_max(nim, v, 1); if (op == inm) ok = nifti_inm(nim, v); if (op == ing) ok = nifti_ing(nim, v); if (op == smth) ok = nifti_smooth_gauss(nim, v, v, v, -6.0); if (op == seed) { if ((v > 0) && (v < 1)) v *= RAND_MAX; srand((unsigned)fabs(v)); } } } //binary operations if (ok != 0) goto fail; ac++; } //convert data to output type (-odt) if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0) return 1; // if we get here, write the output dataset nifti_save(nim, ""); //nifti_image_write( nim ); // and clean up memory nifti_image_free(nim); if (kernel != NULL) _mm_free(kernel); return 0; fail: nifti_image_free(nim); if (kernel != NULL) _mm_free(kernel); return 1; } //main() #endif // #ifndef USING_WASM #ifndef USING_WASM #ifdef DT32 int main32(int argc, char *argv[]) { #else int main64(int argc, char *argv[]) { #endif char *fin = NULL, *fout = NULL; //fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths) //fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod //fslmaths robust range not fully described, this emulation is close //fslmaths ing/inm are listed as "unary" but should be listed as binary if (argc < 3) { printfx("Fatal: show_help shown by wrapper function\n"); exit(1); } int dtCalc = DT_FLOAT32; //data type for calculation int dtOut = DT_FLOAT32; //data type for calculation int ac = 1; // '-dt' sets datatype for calculations if (!strcmp(argv[ac], "-dt")) { if (!strcmp(argv[ac + 1], "double")) { dtCalc = DT_FLOAT64; } else if (strcmp(argv[ac + 1], "float")) { printfx("'-dt' error: only float or double calculations supported\n"); return 1; } ac += 2; if (argc < (ac + 2)) return 1; //insufficient arguments remain } //special case: pass through // no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz" // note fslmaths would save as flt type... but lossless conversion in native format is faster // note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max if (ac + 2 == argc) { fin = argv[ac]; // no string copy, just pointer assignment ac++; nifti_image *nim = nifti_image_read(fin, 1); fout = argv[ac]; // no string copy, just pointer assignment ac++; if (nifti_set_filenames(nim, fout, 0, 1)) return 1; nifti_save(nim, ""); //nifti_image_write( nim ); nifti_image_free(nim); return 0; } //end pass through // next argument is input file fin = argv[ac]; // no string copy, just pointer assignment ac++; //clock_t startTime = clock(); nifti_image *nim = nifti_image_read2(fin, 1); if (!nim) { printfx("** failed to read NIfTI image from '%s'\n", fin); return 2; } //printf("read time: %ld ms\n", timediff(startTime, clock())); in_hdr ihdr = set_input_hdr(nim); //check for "-odt" must be last couplet if (!strcmp(argv[argc - 2], "-odt")) { if (!strcmp(argv[argc - 1], "double")) { dtOut = DT_FLOAT64; } else if (!strcmp(argv[argc - 1], "flt")) { dtOut = DT_FLOAT32; } else if (!strcmp(argv[argc - 1], "int")) { dtOut = DT_INT32; } else if (!strcmp(argv[argc - 1], "short")) { dtOut = DT_INT16; } else if (!strcmp(argv[argc - 1], "ushort")) { dtOut = DT_UINT16; } else if (!strcmp(argv[argc - 1], "char")) { dtOut = DT_UINT8; } else if (!strcmp(argv[argc - 1], "input")) { dtOut = nim->datatype; //ihdr.datatype; //! } else { printfx("Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc - 1]); return 2; } argc = argc - 2; } //odt //convert data to calculation type (-dt) if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0) return 1; //check output filename, e.g does file exist fout = argv[argc - 1]; // no string copy, just pointer assignment if (nifti_set_filenames(nim, fout, 0, 1)) return 1; argc = argc - 1; #if defined(_OPENMP) const int maxNumThreads = omp_get_max_threads(); const char *key = "AFNI_COMPRESSOR"; char *value; value = getenv(key); //export AFNI_COMPRESSOR=PIGZ char pigzKey[5] = "PIGZ"; if ((value != NULL) && (strstr(value, pigzKey))) { omp_set_num_threads(maxNumThreads); printfx("Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(1); printfx("Single threaded\n"); } #endif #else int mainWASM(nifti_image *nim, int argc, char *argv[]) { int ac = 0; #endif //read operations int nkernel = 0; //number of voxels in kernel int *kernel = make_kernel(nim, &nkernel, 3, 3, 3); char *end = NULL; int ok = 0; while (ac < argc) { enum eOp op = unknown; if (!strcmp(argv[ac], "-add")) op = add; if (!strcmp(argv[ac], "-sub")) op = sub; if (!strcmp(argv[ac], "-mul")) op = mul; if (!strcmp(argv[ac], "-div")) op = divX; if (!strcmp(argv[ac], "-rem")) op = rem; if (!strcmp(argv[ac], "-mod")) op = mod; if (!strcmp(argv[ac], "-mas")) op = mas; if (!strcmp(argv[ac], "-thr")) op = thr; if (!strcmp(argv[ac], "-thrp")) op = thrp; if (!strcmp(argv[ac], "-thrP")) op = thrP; if (!strcmp(argv[ac], "-uthr")) op = uthr; if (!strcmp(argv[ac], "-uthrp")) op = uthrp; if (!strcmp(argv[ac], "-uthrP")) op = uthrP; if (!strcmp(argv[ac], "-clamp")) op = clamp; if (!strcmp(argv[ac], "-uclamp")) op = uclamp; if (!strcmp(argv[ac], "-max")) op = max; if (!strcmp(argv[ac], "-min")) op = min; if (!strcmp(argv[ac], "-max")) op = max; //if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas //if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas if (!strcmp(argv[ac], "-power")) op = power; if (!strcmp(argv[ac], "-seed")) op = seed; //if ( ! strcmp(argv[ac], "-restart") ) op = restart; //if ( ! strcmp(argv[ac], "-save") ) op = save; if (!strcmp(argv[ac], "-inm")) op = inm; if (!strcmp(argv[ac], "-ing")) op = ing; if (!strcmp(argv[ac], "-s")) op = smth; if (!strcmp(argv[ac], "-exp")) op = exp1; if (!strcmp(argv[ac], "-ceil")) op = ceil1; if (!strcmp(argv[ac], "-round")) op = ceil1; if (!strcmp(argv[ac], "-floor")) op = floor1; if (!strcmp(argv[ac], "-trunc")) op = trunc1; if (!strcmp(argv[ac], "-log")) op = log1; if (!strcmp(argv[ac], "-sin")) op = sin1; if (!strcmp(argv[ac], "-cos")) op = cos1; if (!strcmp(argv[ac], "-tan")) op = tan1; if (!strcmp(argv[ac], "-asin")) op = asin1; if (!strcmp(argv[ac], "-acos")) op = acos1; if (!strcmp(argv[ac], "-atan")) op = atan1; if (!strcmp(argv[ac], "-sqr")) op = sqr1; if (!strcmp(argv[ac], "-sqrt")) op = sqrt1; if (!strcmp(argv[ac], "-recip")) op = recip1; if (!strcmp(argv[ac], "-abs")) op = abs1; if (!strcmp(argv[ac], "-bin")) op = bin1; if (!strcmp(argv[ac], "-binv")) op = binv1; if (!strcmp(argv[ac], "-edge")) op = edge1; if (!strcmp(argv[ac], "-index")) op = index1; if (!strcmp(argv[ac], "-nan")) op = nan1; if (!strcmp(argv[ac], "-nanm")) op = nanm1; if (!strcmp(argv[ac], "-rand")) op = rand1; if (!strcmp(argv[ac], "-randn")) op = randn1; if (!strcmp(argv[ac], "-range")) op = range1; if (!strcmp(argv[ac], "-rank")) op = rank1; if (!strcmp(argv[ac], "-ranknorm")) op = ranknorm1; if (!strcmp(argv[ac], "-ztop")) op = ztop1; if (!strcmp(argv[ac], "-ptoz")) op = ptoz1; if (!strcmp(argv[ac], "-pval")) op = pval1; if (!strcmp(argv[ac], "-pval0")) op = pval01; if (!strcmp(argv[ac], "-cpval")) op = cpval1; //kernel operations if (!strcmp(argv[ac], "-dilM")) op = dilMk; if (!strcmp(argv[ac], "-dilD")) op = dilDk; if (!strcmp(argv[ac], "-dilF")) op = dilFk; if (!strcmp(argv[ac], "-dilall")) op = dilallk; if (!strcmp(argv[ac], "-ero")) op = erok; if (!strcmp(argv[ac], "-eroF")) op = eroFk; if (!strcmp(argv[ac], "-fmedian")) op = fmediank; if (!strcmp(argv[ac], "-fmean")) op = fmeank; if (!strcmp(argv[ac], "-fmeanu")) op = fmeanuk; if ((op >= exp1) && (op <= ptoz1)) nifti_unary(nim, op); if (!strcmp(argv[ac], "-p")) { ac++; #if defined(_OPENMP) int nProcessors = atoi(argv[ac]); if (nProcessors < 1) { omp_set_num_threads(maxNumThreads); printfx("Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(nProcessors); printfx("Using %d threads\n", nProcessors); } #else printfx("Warning: not compiled for OpenMP: '-p' ignored\n"); #endif } else if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper #ifndef USING_WASM //WASM does not (yet) adjust image size //All Dimensionality reduction operations names begin with Capital letter, no other commands do! int dim = 0; switch (argv[ac][1]) { case 'X': // dim = 1; break; case 'Y': // code to be executed if n = 2; dim = 2; break; case 'Z': // dim = 3; break; case 'T': // code to be executed if n = 2; dim = 4; break; } if (dim == 0) { printfx("Error: unknown dimensionality reduction operation: %s\n", argv[ac]); goto fail; } if (strstr(argv[ac], "mean")) ok = nifti_dim_reduce(nim, Tmean, dim, 0); else if (strstr(argv[ac], "std")) ok = nifti_dim_reduce(nim, Tstd, dim, 0); else if (strstr(argv[ac], "maxn")) ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max else if (strstr(argv[ac], "max")) ok = nifti_dim_reduce(nim, Tmax, dim, 0); else if (strstr(argv[ac], "min")) ok = nifti_dim_reduce(nim, Tmin, dim, 0); else if (strstr(argv[ac], "median")) ok = nifti_dim_reduce(nim, Tmedian, dim, 0); else if (strstr(argv[ac], "perc")) { ac++; int pct = atoi(argv[ac]); ok = nifti_dim_reduce(nim, Tperc, dim, pct); } else if (strstr(argv[ac], "ar1")) ok = nifti_dim_reduce(nim, Tar1, dim, 0); else { printfx("Error unknown dimensionality reduction operation: %s\n", argv[ac]); ok = 1; } #endif //WASM does not (yet) adjust image size } else if (!strcmp(argv[ac], "-roi")) { //int , int , int , int , int , int , int , int ) if ((argc - ac) < 8) { printfx("not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes goto fail; } ac++; int xmin = atoi(argv[ac]); ac++; int xsize = atoi(argv[ac]); ac++; int ymin = atoi(argv[ac]); ac++; int ysize = atoi(argv[ac]); ac++; int zmin = atoi(argv[ac]); ac++; int zsize = atoi(argv[ac]); ac++; int tmin = atoi(argv[ac]); ac++; int tsize = atoi(argv[ac]); nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize); } else if (!strcmp(argv[ac], "-bptfm")) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0); } else if (!strcmp(argv[ac], "-bptf")) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1); #ifdef bandpass } else if (!strcmp(argv[ac], "-bandpass")) { // niimath test4D -bandpass 0.08 0.008 0 c ac++; double lp_hz = strtod(argv[ac], &end); ac++; double hp_hz = strtod(argv[ac], &end); ac++; double TRsec = strtod(argv[ac], &end); ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec); #endif } else if (!strcmp(argv[ac], "-roc")) { #ifndef USING_WASM //WASM does not (yet) support Area-under-ROC //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> ac++; double thresh = strtod(argv[ac], &end); ac++; int outfile = ac; char *fnoise = NULL; if (thresh > 0.0) { ac++; fnoise = argv[ac]; } ac++; int truth = ac; //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]); if (ac >= argc) { printfx("Error: no output filename specified!\n"); //e.g. volume size might differ goto fail; } #endif //WASM does not (yet) support Area-under-ROC } else if (!strcmp(argv[ac], "-unsharp")) { ac++; double sigma = strtod(argv[ac], &end); ac++; double amount = strtod(argv[ac], &end); nifti_unsharp(nim, sigma, sigma, sigma, amount); } else if (strstr(argv[ac], "-otsu")) { ac ++; int mode = atoi(argv[ac]); ok = nifti_otsu(nim, mode, 1); } else if (strstr(argv[ac], "-dehaze")) { ac ++; int mode = atoi(argv[ac]); int zeroFill = 0; if (mode < 0) zeroFill = -1; mode = abs(mode); ok = nifti_otsu(nim, mode, zeroFill); #ifdef bwlabelx } else if (strstr(argv[ac], "-bwlabel")) { ac ++; int conn = atoi(argv[ac]); ok = bwlabel(nim, conn); #endif } else if (!strcmp(argv[ac], "-h2c")) ok = nifti_h2c(nim); else if (!strcmp(argv[ac], "-c2h")) ok = nifti_c2h(nim); else if (!strcmp(argv[ac], "-sobel_binary")) ok = nifti_sobel(nim, 1, 1); else if (!strcmp(argv[ac], "-sobel")) ok = nifti_sobel(nim, 1, 0); else if (!strcmp(argv[ac], "-demean")) ok = nifti_demean(nim); else if (!strcmp(argv[ac], "-detrend")) ok = nifti_detrend_linear(nim); #ifndef USING_WASM //WASM does not (yet) resize images else if (!strcmp(argv[ac], "-subsamp2")) ok = nifti_subsamp2(nim, 0); else if (!strcmp(argv[ac], "-subsamp2offc")) ok = nifti_subsamp2(nim, 1); else if (!strcmp(argv[ac], "-resize")) { ac++; double X = strtod(argv[ac], &end); ac++; double Y = strtod(argv[ac], &end); ac++; double Z = strtod(argv[ac], &end); ac++; int interp_method = atoi(argv[ac]); ok = nifti_resize(nim, X, Y, Z, interp_method); } else if (!strcmp(argv[ac], "-crop")) { ac++; int tmin = atoi(argv[ac]); ac++; int tsize = atoi(argv[ac]); ok = nifti_crop(nim, tmin, tsize); } else if (!strcmp(argv[ac], "--compare")) { //--function terminates without saving image ac++; nifti_compare(nim, argv[ac]); //always terminates } #endif //WASM does not (yet) resize images else if (!strcmp(argv[ac], "-edt")) ok = nifti_edt(nim); else if (!strcmp(argv[ac], "-fillh")) ok = nifti_fillh(nim, 0); else if (!strcmp(argv[ac], "-fillh26")) ok = nifti_fillh(nim, 1); else if (!strcmp(argv[ac], "-kernel")) { ac++; if (kernel != NULL) _mm_free(kernel); kernel = NULL; if (!strcmp(argv[ac], "3D")) kernel = make_kernel(nim, &nkernel, 3, 3, 3); if (!strcmp(argv[ac], "2D")) kernel = make_kernel(nim, &nkernel, 3, 3, 1); if (!strcmp(argv[ac], "boxv")) { ac++; int vx = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx, vx, vx); } if (!strcmp(argv[ac], "sphere")) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_sphere(nim, &nkernel, mm); } #ifndef USING_WASM //WASM does not read files if (!strcmp(argv[ac], "file")) { ac++; kernel = make_kernel_file(nim, &nkernel, argv[ac]); } #endif //WASM does not read files if (!strcmp(argv[ac], "gauss")) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_gauss(nim, &nkernel, mm); } if (!strcmp(argv[ac], "box")) { //all voxels in a cube of width <size> mm centered on target voxel"); ac++; double mm = strtod(argv[ac], &end); int vx = (2 * floor(mm / nim->dx)) + 1; int vy = (2 * floor(mm / nim->dy)) + 1; int vz = (2 * floor(mm / nim->dz)) + 1; kernel = make_kernel(nim, &nkernel, vx, vy, vz); } if (!strcmp(argv[ac], "boxv3")) { ac++; int vx = atoi(argv[ac]); ac++; int vy = atoi(argv[ac]); ac++; int vz = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx, vy, vz); } if (kernel == NULL) { printfx("Error: '-kernel' option failed.\n"); //e.g. volume size might differ ok = 1; } } #ifndef USING_WASM //WASM does not handle tensors or file reads else if (!strcmp(argv[ac], "-tensor_2lower")) { ok = nifti_tensor_2(nim, 0); } else if (!strcmp(argv[ac], "-tensor_2upper")) { ok = nifti_tensor_2(nim, 1); } else if (!strcmp(argv[ac], "-tensor_decomp")) { ok = nifti_tensor_decomp(nim, 1); } else if (!strcmp(argv[ac], "-tensor_decomp_lower")) { ok = nifti_tensor_decomp(nim, 0); } else if (!strcmp(argv[ac], "-save")) { ac++; char *fout2 = argv[ac]; if (nifti_set_filenames(nim, fout2, 1, 1)) ok = 1; else { nifti_save(nim, ""); //nifti_image_write( nim ); nifti_set_filenames(nim, fout, 1, 1); } } else if (!strcmp(argv[ac], "-restart")) { if (kernel != NULL) printfx("Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ nifti_image_free(nim); if (kernel != NULL) _mm_free(kernel); kernel = make_kernel(nim, &nkernel, 3, 3, 3); ac++; nim = nifti_image_read(argv[ac], 1); if (!nim) ok = 1; //error } #endif //WASM does not handle tensors or file reads else if (!strcmp(argv[ac], "-grid")) { ac++; double v = strtod(argv[ac], &end); ac++; int s = atoi(argv[ac]); ok = nifti_grid(nim, v, s); } else if (strstr(argv[ac], "-dog")) { int orient = 0; if (strstr(argv[ac], "-dogx")) orient = 1; if (strstr(argv[ac], "-dogy")) orient = 2; if (strstr(argv[ac], "-dogz")) orient = 3; if (strstr(argv[ac], "-dogr")) orient = -1; ac++; double pos = strtod(argv[ac], &end); ac++; double neg = strtod(argv[ac], &end); ok = nifti_dog(nim, pos, neg, orient); } else if (!strcmp(argv[ac], "-tfce")) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ok = nifti_tfce(nim, H, E, c); } #ifndef USING_WASM //WASM does not support tfceS else if (!strcmp(argv[ac], "-tfceS")) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ac++; int x = atoi(argv[ac]); ac++; int y = atoi(argv[ac]); ac++; int z = atoi(argv[ac]); ac++; double tfce_thresh = strtod(argv[ac], &end); ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh); } #endif //WASM does not support tfceS else if (op == unknown) { printfx("!!Error: unsupported operation '%s'\n", argv[ac]); goto fail; } if ((op >= dilMk) && (op <= fmeanuk)) ok = nifti_kernel(nim, op, kernel, nkernel); if ((op >= exp1) && (op <= ptoz1)) nifti_unary(nim, op); if ((op >= add) && (op < exp1)) { //binary operations ac++; double v = strtod(argv[ac], &end); //if (end == argv[ac]) { if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4" if ((op == power) || (op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed)) { printfx("Error: '%s' expects numeric value\n", argv[ac - 1]); goto fail; } else #ifdef USING_WASM ok = 123; //WASM does not read files #else ok = nifti_binary(nim, argv[ac], op); #endif } else { if (op == add) ok = nifti_rescale(nim, 1.0, v); if (op == sub) ok = nifti_rescale(nim, 1.0, -v); if (op == mul) ok = nifti_rescale(nim, v, 0.0); if (op == divX) ok = nifti_rescale(nim, 1.0 / v, 0.0); if (op == mod) ok = nifti_rem(nim, v, 1); if (op == rem) ok = nifti_rem(nim, v, 0); if (op == mas) { printfx("Error: -mas expects image not number\n"); goto fail; } if (op == power) ok = nifti_binary_power(nim, v); if (op == thr) ok = nifti_thr(nim, v, 0, 0.0); if ((op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP)) ok = nifti_thrp(nim, v, op); if (op == uthr) ok = nifti_thr(nim, v, 1, 0.0); if (op == max) ok = nifti_max(nim, v, 0); if (op == min) ok = nifti_max(nim, v, 1); if (op == inm) ok = nifti_inm(nim, v); if (op == ing) ok = nifti_ing(nim, v); if (op == smth) ok = nifti_smooth_gauss(nim, v, v, v, -6.0); if (op == seed) { if ((v > 0) && (v < 1)) v *= RAND_MAX; srand((unsigned)fabs(v)); } } } //binary operations if (ok != 0) return ok; ac++; } #ifndef USING_WASM //convert data to output type (-odt) if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0) return 1; // if we get here, write the output dataset nifti_save(nim, ""); //nifti_image_write( nim ); // and clean up memory nifti_image_free(nim); #endif if (kernel != NULL) _mm_free(kernel); return 0; fail: #ifndef USING_WASM nifti_image_free(nim); #endif if (kernel != NULL) _mm_free(kernel); return 1; } //All code below for WASM reading #ifdef USING_WASM static char* splitArgv(char **str, char **word){ const char QUOTE = '\''; bool inquotes = false; // optimization if( **str == 0 ) return NULL; // Skip leading spaces. while (**str && isspace(**str)) (*str)++; if( **str == '\0') return NULL; // Phrase in quotes is one arg if( **str == QUOTE ){ (*str)++; inquotes = true; } // Set phrase begining *word = *str; // Skip all chars if in quotes if( inquotes ){ while( **str && **str!=QUOTE ) (*str)++; //if( **str!= QUOTE ) }else{ // Skip non-space characters. while( **str && !isspace(**str) ) (*str)++; } // Null terminate the phrase and set `str` pointer to next symbol if(**str) *(*str)++ = '\0'; return *str; } char* parseStrToArgcArgvInsitu( char *str, const int argc_MAX, int *argc, char* argv[] ) { *argc = 0; while( *argc<argc_MAX-1 && splitArgv(&str, &argv[*argc]) ){ ++(*argc); if( *str == '\0' ) break; } argv[*argc] = NULL; return str; }; float clampf(float d, float min, float max) { float t = d < min ? min : d; return t > max ? max : t; } __attribute__((used)) int niimath (void *img, int datatype, int nx, int ny, int nz, int nt, float dx, float dy, float dz, float dt, char * cmdstr){ int nvox = nx * ny * nz * MAX(nt, 1); if (nvox < 1) return 101; #define argc_MAX 128 char* argv[argc_MAX] = {0}; int argc=0; char* rest = parseStrToArgcArgvInsitu(cmdstr,argc_MAX,&argc,argv); if( *rest!='\0' ) return 1; nifti_image nim; nim.data = img; nim.nvox = nvox; nim.nx = nx; nim.ny = ny; nim.nz = nz; nim.nt = nt; nim.dx = dx; nim.dy = dy; nim.dz = dz; nim.dt = dt; nim.scl_slope = 1.0; nim.scl_inter = 0.0; nim.cal_max = 1.0; nim.cal_min = 0.0; nim.datatype = DT_FLOAT; if (datatype == DT_FLOAT) return mainWASM(&nim, argc, argv);//niimath_core(&nim, cmdstr); if (datatype == DT_SIGNED_SHORT) { int16_t *img16 = (int16_t *)img; float *img32 = (float *) _mm_malloc(nvox * sizeof(float), 64); for (int i = 0; i < nvox; ++i) img32[i] = img16[i]; nim.data = img32; int ret = mainWASM(&nim, argc, argv); for (int i = 0; i < nvox; ++i) img16[i] = clampf(img32[i], -32768.0, 32767.0);//img32[i]; _mm_free(img32); return ret; } if (datatype == DT_UNSIGNED_CHAR) { uint8_t *img8 = (uint8_t *)img; float *img32 = (float *) _mm_malloc(nvox * sizeof(float), 64); for (int i = 0; i < nvox; ++i) img32[i] = img8[i]; nim.data = img32; int ret = mainWASM(&nim, argc, argv); for (int i = 0; i < nvox; ++i) img8[i] = clampf(img32[i], 0.0, 255.0); //img32[i]; _mm_free(img32); return ret; } return 88; } #endif //WASM code
utils.c
#define _GNU_SOURCE #include "utils.h" #include <signal.h> #include <stdlib.h> #ifdef HAVE_GETTIMEOFDAY #include <sys/time.h> #else #include <time.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif #ifdef HAVE_FENV_H #include <fenv.h> #endif #ifdef HAVE_LIBPNG #include <png.h> #endif /* Random number seed */ uint32_t lcg_seed; /*----------------------------------------------------------------------------*\ * CRC-32 version 2.0.0 by Craig Bruce, 2006-04-29. * * This program generates the CRC-32 values for the files named in the * command-line arguments. These are the same CRC-32 values used by GZIP, * PKZIP, and ZMODEM. The Crc32_ComputeBuf () can also be detached and * used independently. * * THIS PROGRAM IS PUBLIC-DOMAIN SOFTWARE. * * Based on the byte-oriented implementation "File Verification Using CRC" * by Mark R. Nelson in Dr. Dobb's Journal, May 1992, pp. 64-67. * * v1.0.0: original release. * v1.0.1: fixed printf formats. * v1.0.2: fixed something else. * v1.0.3: replaced CRC constant table by generator function. * v1.0.4: reformatted code, made ANSI C. 1994-12-05. * v2.0.0: rewrote to use memory buffer & static table, 2006-04-29. \*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*\ * NAME: * Crc32_ComputeBuf () - computes the CRC-32 value of a memory buffer * DESCRIPTION: * Computes or accumulates the CRC-32 value for a memory buffer. * The 'inCrc32' gives a previously accumulated CRC-32 value to allow * a CRC to be generated for multiple sequential buffer-fuls of data. * The 'inCrc32' for the first buffer must be zero. * ARGUMENTS: * inCrc32 - accumulated CRC-32 value, must be 0 on first call * buf - buffer to compute CRC-32 value for * bufLen - number of bytes in buffer * RETURNS: * crc32 - computed CRC-32 value * ERRORS: * (no errors are possible) \*----------------------------------------------------------------------------*/ uint32_t compute_crc32 (uint32_t in_crc32, const void *buf, size_t buf_len) { static const uint32_t crc_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; uint32_t crc32; unsigned char * byte_buf; size_t i; /* accumulate crc32 for buffer */ crc32 = in_crc32 ^ 0xFFFFFFFF; byte_buf = (unsigned char*) buf; for (i = 0; i < buf_len; i++) crc32 = (crc32 >> 8) ^ crc_table[(crc32 ^ byte_buf[i]) & 0xFF]; return (crc32 ^ 0xFFFFFFFF); } pixman_bool_t is_little_endian (void) { volatile uint16_t endian_check_var = 0x1234; return (*(volatile uint8_t *)&endian_check_var == 0x34); } /* perform endian conversion of pixel data */ void image_endian_swap (pixman_image_t *img) { int stride = pixman_image_get_stride (img); uint32_t *data = pixman_image_get_data (img); int height = pixman_image_get_height (img); int bpp = PIXMAN_FORMAT_BPP (pixman_image_get_format (img)); int i, j; /* swap bytes only on big endian systems */ if (is_little_endian()) return; if (bpp == 8) return; for (i = 0; i < height; i++) { uint8_t *line_data = (uint8_t *)data + stride * i; switch (bpp) { case 1: for (j = 0; j < stride; j++) { line_data[j] = ((line_data[j] & 0x80) >> 7) | ((line_data[j] & 0x40) >> 5) | ((line_data[j] & 0x20) >> 3) | ((line_data[j] & 0x10) >> 1) | ((line_data[j] & 0x08) << 1) | ((line_data[j] & 0x04) << 3) | ((line_data[j] & 0x02) << 5) | ((line_data[j] & 0x01) << 7); } break; case 4: for (j = 0; j < stride; j++) { line_data[j] = (line_data[j] >> 4) | (line_data[j] << 4); } break; case 16: for (j = 0; j + 2 <= stride; j += 2) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; line_data[j + 1] = t1; line_data[j + 0] = t2; } break; case 24: for (j = 0; j + 3 <= stride; j += 3) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; char t3 = line_data[j + 2]; line_data[j + 2] = t1; line_data[j + 1] = t2; line_data[j + 0] = t3; } break; case 32: for (j = 0; j + 4 <= stride; j += 4) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; char t3 = line_data[j + 2]; char t4 = line_data[j + 3]; line_data[j + 3] = t1; line_data[j + 2] = t2; line_data[j + 1] = t3; line_data[j + 0] = t4; } break; default: assert (FALSE); break; } } } #define N_LEADING_PROTECTED 10 #define N_TRAILING_PROTECTED 10 typedef struct { void *addr; uint32_t len; uint8_t *trailing; int n_bytes; } info_t; #if defined(HAVE_MPROTECT) && defined(HAVE_GETPAGESIZE) && defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP) /* This is apparently necessary on at least OS X */ #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif void * fence_malloc (int64_t len) { unsigned long page_size = getpagesize(); unsigned long page_mask = page_size - 1; uint32_t n_payload_bytes = (len + page_mask) & ~page_mask; uint32_t n_bytes = (page_size * (N_LEADING_PROTECTED + N_TRAILING_PROTECTED + 2) + n_payload_bytes) & ~page_mask; uint8_t *initial_page; uint8_t *leading_protected; uint8_t *trailing_protected; uint8_t *payload; uint8_t *addr; if (len < 0) abort(); addr = mmap (NULL, n_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { printf ("mmap failed on %lld %u\n", (long long int)len, n_bytes); return NULL; } initial_page = (uint8_t *)(((unsigned long)addr + page_mask) & ~page_mask); leading_protected = initial_page + page_size; payload = leading_protected + N_LEADING_PROTECTED * page_size; trailing_protected = payload + n_payload_bytes; ((info_t *)initial_page)->addr = addr; ((info_t *)initial_page)->len = len; ((info_t *)initial_page)->trailing = trailing_protected; ((info_t *)initial_page)->n_bytes = n_bytes; if ((mprotect (leading_protected, N_LEADING_PROTECTED * page_size, PROT_NONE) == -1) || (mprotect (trailing_protected, N_TRAILING_PROTECTED * page_size, PROT_NONE) == -1)) { munmap (addr, n_bytes); return NULL; } return payload; } void fence_free (void *data) { uint32_t page_size = getpagesize(); uint8_t *payload = data; uint8_t *leading_protected = payload - N_LEADING_PROTECTED * page_size; uint8_t *initial_page = leading_protected - page_size; info_t *info = (info_t *)initial_page; munmap (info->addr, info->n_bytes); } #else void * fence_malloc (int64_t len) { return malloc (len); } void fence_free (void *data) { free (data); } #endif uint8_t * make_random_bytes (int n_bytes) { uint8_t *bytes = fence_malloc (n_bytes); int i; if (!bytes) return NULL; for (i = 0; i < n_bytes; ++i) bytes[i] = lcg_rand () & 0xff; return bytes; } void a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels) { uint8_t *dst8 = (uint8_t *)dst; int i; for (i = 0; i < n_pixels; ++i) { uint32_t p = src[i]; uint8_t a, r, g, b; a = (p & 0xff000000) >> 24; r = (p & 0x00ff0000) >> 16; g = (p & 0x0000ff00) >> 8; b = (p & 0x000000ff) >> 0; if (a != 0) { #define DIVIDE(c, a) \ do \ { \ int t = ((c) * 255) / a; \ (c) = t < 0? 0 : t > 255? 255 : t; \ } while (0) DIVIDE (r, a); DIVIDE (g, a); DIVIDE (b, a); } *dst8++ = r; *dst8++ = g; *dst8++ = b; *dst8++ = a; } } #ifdef HAVE_LIBPNG pixman_bool_t write_png (pixman_image_t *image, const char *filename) { int width = pixman_image_get_width (image); int height = pixman_image_get_height (image); int stride = width * 4; uint32_t *data = malloc (height * stride); pixman_image_t *copy; png_struct *write_struct; png_info *info_struct; pixman_bool_t result = FALSE; FILE *f = fopen (filename, "wb"); png_bytep *row_pointers; int i; if (!f) return FALSE; row_pointers = malloc (height * sizeof (png_bytep)); copy = pixman_image_create_bits ( PIXMAN_a8r8g8b8, width, height, data, stride); pixman_image_composite32 ( PIXMAN_OP_SRC, image, NULL, copy, 0, 0, 0, 0, 0, 0, width, height); a8r8g8b8_to_rgba_np (data, data, height * width); for (i = 0; i < height; ++i) row_pointers[i] = (png_bytep)(data + i * width); if (!(write_struct = png_create_write_struct ( PNG_LIBPNG_VER_STRING, NULL, NULL, NULL))) goto out1; if (!(info_struct = png_create_info_struct (write_struct))) goto out2; png_init_io (write_struct, f); png_set_IHDR (write_struct, info_struct, width, height, 8, PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info (write_struct, info_struct); png_write_image (write_struct, row_pointers); png_write_end (write_struct, NULL); result = TRUE; out2: png_destroy_write_struct (&write_struct, &info_struct); out1: if (fclose (f) != 0) result = FALSE; pixman_image_unref (copy); free (row_pointers); free (data); return result; } #else /* no libpng */ pixman_bool_t write_png (pixman_image_t *image, const char *filename) { return FALSE; } #endif /* * A function, which can be used as a core part of the test programs, * intended to detect various problems with the help of fuzzing input * to pixman API (according to some templates, aka "smart" fuzzing). * Some general information about such testing can be found here: * http://en.wikipedia.org/wiki/Fuzz_testing * * It may help detecting: * - crashes on bad handling of valid or reasonably invalid input to * pixman API. * - deviations from the behavior of older pixman releases. * - deviations from the behavior of the same pixman release, but * configured in a different way (for example with SIMD optimizations * disabled), or running on a different OS or hardware. * * The test is performed by calling a callback function a huge number * of times. The callback function is expected to run some snippet of * pixman code with pseudorandom variations to the data feeded to * pixman API. A result of running each callback function should be * some deterministic value which depends on test number (test number * can be used as a seed for PRNG). When 'verbose' argument is nonzero, * callback function is expected to print to stdout some information * about what it does. * * Return values from many small tests are accumulated together and * used as final checksum, which can be compared to some expected * value. Running the tests not individually, but in a batch helps * to reduce process start overhead and also allows to parallelize * testing and utilize multiple CPU cores. * * The resulting executable can be run without any arguments. In * this case it runs a batch of tests starting from 1 and up to * 'default_number_of_iterations'. The resulting checksum is * compared with 'expected_checksum' and FAIL or PASS verdict * depends on the result of this comparison. * * If the executable is run with 2 numbers provided as command line * arguments, they specify the starting and ending numbers for a test * batch. * * If the executable is run with only one number provided as a command * line argument, then this number is used to call the callback function * once, and also with verbose flag set. */ int fuzzer_test_main (const char *test_name, int default_number_of_iterations, uint32_t expected_checksum, uint32_t (*test_function)(int testnum, int verbose), int argc, const char *argv[]) { int i, n1 = 1, n2 = 0; uint32_t checksum = 0; int verbose = getenv ("VERBOSE") != NULL; if (argc >= 3) { n1 = atoi (argv[1]); n2 = atoi (argv[2]); if (n2 < n1) { printf ("invalid test range\n"); return 1; } } else if (argc >= 2) { n2 = atoi (argv[1]); checksum = test_function (n2, 1); printf ("%d: checksum=%08X\n", n2, checksum); return 0; } else { n1 = 1; n2 = default_number_of_iterations; } #ifdef USE_OPENMP #pragma omp parallel for reduction(+:checksum) default(none) \ shared(n1, n2, test_function, verbose) #endif for (i = n1; i <= n2; i++) { uint32_t crc = test_function (i, 0); if (verbose) printf ("%d: %08X\n", i, crc); checksum += crc; } if (n1 == 1 && n2 == default_number_of_iterations) { if (checksum == expected_checksum) { printf ("%s test passed (checksum=%08X)\n", test_name, checksum); } else { printf ("%s test failed! (checksum=%08X, expected %08X)\n", test_name, checksum, expected_checksum); return 1; } } else { printf ("%d-%d: checksum=%08X\n", n1, n2, checksum); } return 0; } /* Try to obtain current time in seconds */ double gettime (void) { #ifdef HAVE_GETTIMEOFDAY struct timeval tv; gettimeofday (&tv, NULL); return (double)((int64_t)tv.tv_sec * 1000000 + tv.tv_usec) / 1000000.; #else return (double)clock() / (double)CLOCKS_PER_SEC; #endif } uint32_t get_random_seed (void) { double d = gettime(); lcg_srand (*(uint32_t *)&d); return lcg_rand_u32 (); } static const char *global_msg; static void on_alarm (int signo) { printf ("%s\n", global_msg); exit (1); } void fail_after (int seconds, const char *msg) { #ifdef HAVE_SIGACTION #ifdef HAVE_ALARM struct sigaction action; global_msg = msg; memset (&action, 0, sizeof (action)); action.sa_handler = on_alarm; alarm (seconds); sigaction (SIGALRM, &action, NULL); #endif #endif } void enable_fp_exceptions (void) { #ifdef HAVE_FENV_H #ifdef HAVE_FEENABLEEXCEPT /* Note: we don't enable the FE_INEXACT trap because * that happens quite commonly. It is possible that * over- and underflow should similarly be considered * okay, but for now the test suite passes with them * enabled, and it's useful to know if they start * occuring. */ feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW); #endif #endif } void * aligned_malloc (size_t align, size_t size) { void *result; #ifdef HAVE_POSIX_MEMALIGN if (posix_memalign (&result, align, size) != 0) result = NULL; #else result = malloc (size); #endif return result; } #define CONVERT_15(c, is_rgb) \ (is_rgb? \ ((((c) >> 3) & 0x001f) | \ (((c) >> 6) & 0x03e0) | \ (((c) >> 9) & 0x7c00)) : \ (((((c) >> 16) & 0xff) * 153 + \ (((c) >> 8) & 0xff) * 301 + \ (((c) ) & 0xff) * 58) >> 2)) void initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb) { int i; uint32_t mask = (1 << depth) - 1; for (i = 0; i < 32768; ++i) palette->ent[i] = lcg_rand() & mask; memset (palette->rgba, 0, sizeof (palette->rgba)); for (i = 0; i < mask + 1; ++i) { uint32_t rgba24; pixman_bool_t retry; uint32_t i15; /* We filled the rgb->index map with random numbers, but we * do need the ability to round trip, that is if some indexed * color expands to an argb24, then the 15 bit version of that * color must map back to the index. Anything else, we don't * care about too much. */ do { uint32_t old_idx; rgba24 = lcg_rand(); i15 = CONVERT_15 (rgba24, is_rgb); old_idx = palette->ent[i15]; if (CONVERT_15 (palette->rgba[old_idx], is_rgb) == i15) retry = 1; else retry = 0; } while (retry); palette->rgba[i] = rgba24; palette->ent[i15] = i; } for (i = 0; i < mask + 1; ++i) { assert (palette->ent[CONVERT_15 (palette->rgba[i], is_rgb)] == i); } } static double round_channel (double p, int m) { int t; double r; t = p * ((1 << m)); t -= t >> m; r = t / (double)((1 << m) - 1); return r; } void round_color (pixman_format_code_t format, color_t *color) { if (PIXMAN_FORMAT_R (format) == 0) { color->r = 0.0; color->g = 0.0; color->b = 0.0; } else { color->r = round_channel (color->r, PIXMAN_FORMAT_R (format)); color->g = round_channel (color->g, PIXMAN_FORMAT_G (format)); color->b = round_channel (color->b, PIXMAN_FORMAT_B (format)); } if (PIXMAN_FORMAT_A (format) == 0) color->a = 1; else color->a = round_channel (color->a, PIXMAN_FORMAT_A (format)); } /* Check whether @pixel is a valid quantization of the a, r, g, b * parameters. Some slack is permitted. */ void pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format) { assert (PIXMAN_FORMAT_VIS (format)); checker->format = format; switch (PIXMAN_FORMAT_TYPE (format)) { case PIXMAN_TYPE_A: checker->bs = 0; checker->gs = 0; checker->rs = 0; checker->as = 0; break; case PIXMAN_TYPE_ARGB: checker->bs = 0; checker->gs = checker->bs + PIXMAN_FORMAT_B (format); checker->rs = checker->gs + PIXMAN_FORMAT_G (format); checker->as = checker->rs + PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_ABGR: checker->rs = 0; checker->gs = checker->rs + PIXMAN_FORMAT_R (format); checker->bs = checker->gs + PIXMAN_FORMAT_G (format); checker->as = checker->bs + PIXMAN_FORMAT_B (format); break; case PIXMAN_TYPE_BGRA: /* With BGRA formats we start counting at the high end of the pixel */ checker->bs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_B (format); checker->gs = checker->bs - PIXMAN_FORMAT_B (format); checker->rs = checker->gs - PIXMAN_FORMAT_G (format); checker->as = checker->rs - PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_RGBA: /* With BGRA formats we start counting at the high end of the pixel */ checker->rs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_R (format); checker->gs = checker->rs - PIXMAN_FORMAT_R (format); checker->bs = checker->gs - PIXMAN_FORMAT_G (format); checker->as = checker->bs - PIXMAN_FORMAT_B (format); break; default: assert (0); break; } checker->am = ((1 << PIXMAN_FORMAT_A (format)) - 1) << checker->as; checker->rm = ((1 << PIXMAN_FORMAT_R (format)) - 1) << checker->rs; checker->gm = ((1 << PIXMAN_FORMAT_G (format)) - 1) << checker->gs; checker->bm = ((1 << PIXMAN_FORMAT_B (format)) - 1) << checker->bs; checker->aw = PIXMAN_FORMAT_A (format); checker->rw = PIXMAN_FORMAT_R (format); checker->gw = PIXMAN_FORMAT_G (format); checker->bw = PIXMAN_FORMAT_B (format); } void pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel, int *a, int *r, int *g, int *b) { *a = (pixel & checker->am) >> checker->as; *r = (pixel & checker->rm) >> checker->rs; *g = (pixel & checker->gm) >> checker->gs; *b = (pixel & checker->bm) >> checker->bs; } static int32_t convert (double v, uint32_t width, uint32_t mask, uint32_t shift, double def) { int32_t r; if (!mask) v = def; r = (v * ((mask >> shift) + 1)); r -= r >> width; return r; } static void get_limits (const pixel_checker_t *checker, double limit, color_t *color, int *ao, int *ro, int *go, int *bo) { *ao = convert (color->a + limit, checker->aw, checker->am, checker->as, 1.0); *ro = convert (color->r + limit, checker->rw, checker->rm, checker->rs, 0.0); *go = convert (color->g + limit, checker->gw, checker->gm, checker->gs, 0.0); *bo = convert (color->b + limit, checker->bw, checker->bm, checker->bs, 0.0); } /* The acceptable deviation in units of [0.0, 1.0] */ #define DEVIATION (0.004) void pixel_checker_get_max (const pixel_checker_t *checker, color_t *color, int *am, int *rm, int *gm, int *bm) { get_limits (checker, DEVIATION, color, am, rm, gm, bm); } void pixel_checker_get_min (const pixel_checker_t *checker, color_t *color, int *am, int *rm, int *gm, int *bm) { get_limits (checker, - DEVIATION, color, am, rm, gm, bm); } pixman_bool_t pixel_checker_check (const pixel_checker_t *checker, uint32_t pixel, color_t *color) { int32_t a_lo, a_hi, r_lo, r_hi, g_lo, g_hi, b_lo, b_hi; int32_t ai, ri, gi, bi; pixman_bool_t result; pixel_checker_get_min (checker, color, &a_lo, &r_lo, &g_lo, &b_lo); pixel_checker_get_max (checker, color, &a_hi, &r_hi, &g_hi, &b_hi); pixel_checker_split_pixel (checker, pixel, &ai, &ri, &gi, &bi); result = a_lo <= ai && ai <= a_hi && r_lo <= ri && ri <= r_hi && g_lo <= gi && gi <= g_hi && b_lo <= bi && bi <= b_hi; return result; }
copysplit.c
int main () { int X = 9; int diff = 10; int diff1 = 20; #pragma omp parallel { while (1) { if (X < 10) { #pragma omp single { diff = diff1; } #pragma omp barrier break; } #pragma omp barrier } } X = diff; }
benchmark.h
#ifndef BENCHMARK_H #define BENCHMARK_H #include "datasets.h" #include "benchmark_utils.h" #include "algorithms/binary_search.h" #include "algorithms/linear_search.h" #include "algorithms/interpolation_search.h" #include "algorithms/tip.h" #include "algorithms/sip.h" #include "algorithms/bin_eyt.h" #include "omp.h" #include "util.h" #include <algorithm> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <iterator> #include <map> #include <numeric> #include <random> #include <set> #include <sstream> #include <string> #include <unordered_map> #include <vector> #include <memory> using Fn = std::vector<double>(Run &, const DatasetBase &); using fn_tuple = std::tuple<const char *, Fn *>; using std::make_tuple; struct Run { DatasetParam dataset_param; std::string name; int n_thds; bool ok; Run(DatasetParam dataset_param, std::string name, int n_thds) : dataset_param(dataset_param), name(name), n_thds(n_thds), ok(true) {} template<typename SearchAlgorithm, int record_bytes> static std::vector<double> searchAndMeasure(Run &run, const DatasetBase &dataset) { const auto &inputDataset = static_cast<const Dataset<record_bytes> &>(dataset); #ifdef INFINITE_REPEAT constexpr bool infinite_repeat = true; #else constexpr bool infinite_repeat = false; #endif constexpr int sample_size = 1000; const int n_samples = inputDataset.keys.size() / sample_size; auto &keys_to_search_for = inputDataset.permuted_keys; // TODO this can't be a template of a template have to specialize earlier // have to specialize in the class itself. Maybe template macros? SearchAlgorithm searchAlgorithm(inputDataset.keys); //Stores the times to search each subset std::vector<double> ns(n_samples * run.n_thds); // Stores the start of each subset in keys_to_search_for std::vector<int> subset_indexes(n_samples * run.n_thds); auto rng = std::mt19937(42); for (auto it = subset_indexes.begin(); it != subset_indexes.end(); it += n_samples) { std::iota(it, it + n_samples, 0); if (it != subset_indexes.begin()) std::shuffle(it, it + n_samples, rng); } // make copy to pass it easier in the parallel region as // private copy (firstprivate) const auto inputsum = inputDataset.sum; #pragma omp parallel default(none) \ num_threads(run.n_thds) firstprivate(n_samples, inputsum) \ shared(keys_to_search_for, run, searchAlgorithm, ns, subset_indexes) { const int tid = omp_get_thread_num(); const auto &thread_ns = &ns[tid * n_samples]; thread_ns[0] = 0.0; auto valSum = 0UL; for (int sample_index = 0;; sample_index++) { if (sample_index == n_samples) { if (!infinite_repeat || valSum != inputsum) break; valSum = sample_index = 0; } int query_index = subset_indexes[tid * n_samples + sample_index] * sample_size; auto t0 = std::chrono::steady_clock::now(); for (int i = query_index; i < query_index + sample_size; i++) { auto val = searchAlgorithm.search(keys_to_search_for[i]); valSum += val; assert(val == keys_to_search_for[i]); } auto t1 = std::chrono::steady_clock::now(); double ns_elapsed = std::chrono::nanoseconds(t1 - t0).count(); // thread_ns[0] += ns_elapsed; if (!infinite_repeat) thread_ns[sample_index] = ns_elapsed / sample_size; } #pragma omp critical run.ok = run.ok && valSum == inputsum; // Verify correct results. } return ns; } template<typename SearchAlgorithm, int record_bytes> static std::vector<double> searchAndMetadata(Run &run, const DatasetBase &dataset) { const auto &inputDataset = static_cast<const Dataset<record_bytes> &>(dataset); sip<record_bytes> searchAlgorithm(inputDataset.keys); std::vector<std::pair<int, int>> res; long iterations = 0; long steps = 0; for (auto k : inputDataset.permuted_keys) { auto val = searchAlgorithm.search_metadata(k); iterations += val.first; steps += val.second; } std::cout<< (double) iterations / (double)inputDataset.permuted_keys.size() << " " << (double)steps / (double)inputDataset.permuted_keys.size() << std::endl; return {}; } template<int record_bytes> static std::vector<double> findAlgorithmAndSearch(Run &run, const DatasetBase &dataset) { constexpr auto algorithm_mapper = std::array < fn_tuple, 8 > { // Interpolation Search make_tuple("is", searchAndMeasure<InterpolationSearch<record_bytes>, record_bytes>), make_tuple("isseq", searchAndMeasure<sip<record_bytes, false>, record_bytes>), // SIP and TIP make_tuple("sip", searchAndMeasure<sip<record_bytes>, record_bytes>), make_tuple("tip", searchAndMeasure<tip<record_bytes, 64>, record_bytes>), // Binary Search make_tuple("bs", searchAndMeasure<Binary<record_bytes>, record_bytes>), // Search Eytzinger make_tuple("b-eyt", searchAndMeasure<b_eyt<record_bytes, false>, record_bytes>), // Search Eytzinger with prefetch make_tuple("b-eyt-p", searchAndMeasure<b_eyt<record_bytes, true>, record_bytes>), // Collects numer of intepolation and sequential steps of SIP make_tuple("sip_metadata", searchAndMetadata<sip<record_bytes>, record_bytes>), }; // Find the correct search algorithm to use as specified in the run. auto it = std::find_if( algorithm_mapper.begin(), algorithm_mapper.end(), [run](const auto &x) { return std::string(std::get<const char *>(x)) == run.name; }); if (it == algorithm_mapper.end()) { std::cerr << "algorithm " << run.name << " not found."; assert(!"Algorithm not found"); return std::vector<double>(); } // Run the earch algorithm and return the results return std::get<Fn *>(*it)(run, dataset); } auto search(const DatasetBase &dataset) { auto n = dataset_param.n; auto distribution = dataset_param.distribution; auto param = dataset_param.param; auto record_bytes = dataset_param.record_bytes; // Stores the times to search each 1000 record subset std::vector<double> ns; // Find the correct alorithm and run it switch (dataset_param.record_bytes) { case 8:ns = findAlgorithmAndSearch<8>(*this, dataset); break; case 32:ns = findAlgorithmAndSearch<32>(*this, dataset); break; case 128:ns = findAlgorithmAndSearch<128>(*this, dataset); break; default:assert(!"record not supported"); } // If not ok then execution failed, due to wrong results of // the search algorithm. if (!this->ok) std::cerr << "Execution failed" << param << ' ' << this->name << '\n'; return ns; } }; #endif
linalg.h
/** * Copyright (c) 2015, Jozef Stefan Institute, Quintelligence d.o.o. and contributors * All rights reserved. * * This source code is licensed under the FreeBSD license found in the * LICENSE file in the root directory of this source tree. */ #ifndef LINALG_H #define LINALG_H /////////////////////////////////////////////////////////////////////// // Blas Support #ifdef BLAS #define MKL_Complex8 std::complex<float> #define MKL_Complex16 std::complex<double> #define lapack_complex_float std::complex<float> #define lapack_complex_double std::complex<double> #define LAPACK_COMPLEX_CPP #ifdef AMD #include "acml.h" #elif INTEL #undef small #include "mkl.h" //#include "mkl_scalapack.h" #else #include "cblas.h" #ifdef LAPACKE #include "lapacke.h" #endif #endif #endif #include "base.h" namespace TypeCheck { template<typename T1> struct is_float { static const bool value = false; }; template<> struct is_float<float> { static const bool value = true; }; template<> struct is_float<TNum<float> > { static const bool value = true; }; template<typename T1> struct is_double { static const bool value = false; }; template<> struct is_double<double> { static const bool value = true; }; template<> struct is_double<TNum<double> > { static const bool value = true; }; template<typename T1> struct is_complex_float { static const bool value = false; }; template<> struct is_complex_float< std::complex<float> > { static const bool value = true; }; template<> struct is_complex_float< TNum<std::complex<float> > > { static const bool value = true; }; template<typename T1> struct is_complex_double { static const bool value = false; }; template<> struct is_complex_double< std::complex<double> > { static const bool value = true; }; template<> struct is_complex_double< TNum<std::complex<double> > > { static const bool value = true; }; } // the matrix dimension classificator for the (Dim parameter) enum TMatDim { mdCols = 1, mdRows = 2 }; /////////////////////////////////////////////////////////////////////// // forward declarations class TLinAlg; ////////////////////////////////////////////////////////////////////// // Miscellaneous linear algebra functions class TLAMisc { public: //Sort double array #ifdef SCALAPACK template<class TSizeTy> static void Sort(TVec<TFlt, TSizeTy> & Vec, TVec<TSizeTy, TSizeTy>& Index, const TBool& DecreseP); #endif // Dumps vector to file so Excel can read it static void SaveCsvTFltV(const TFltV& Vec, TSOut& SOut); // Dumps sparse vector to file so Matlab can read it static void SaveMatlabTFltIntKdV(const TIntFltKdV& SpV, const int& ColN, TSOut& SOut); /// Dumps sparse matrix to file so Matlab can read it static void SaveMatlabSpMat(const TVec<TIntFltKdV>& SpMat, TSOut& SOut); /// Dumps sparse matrix to file so Matlab can read it static void SaveMatlabSpMat(const TTriple<TIntV, TIntV, TFltV>& SpMat, TSOut& SOut); // Dumps vector to file so Matlab can read it static void SaveMatlabTFltV(const TFltV& m, const TStr& FName); // Dumps vector to file so Matlab can read it static void SaveMatlabTIntV(const TIntV& m, const TStr& FName); // Dumps column ColId from m to file so Matlab can read it static void SaveMatlabTFltVVCol(const TFltVV& m, int ColId, const TStr& FName); // Dumps matrix to file so Matlab can read it static void SaveMatlabTFltVV(const TFltVV& m, const TStr& FName); // Dumps matrix to the output stream so Matlab can read it static void SaveMatlabTFltVV(const TFltVV& m, TSOut& SOut); // Dumps main minor rowN x colN to file so Matlab can read it static void SaveMatlabTFltVVMjrSubMtrx(const TFltVV& m, int rowN, int colN, const TStr& FName); // loads matlab full matrix static void LoadMatlabTFltVV(const TStr& FNm, TVec<TFltV>& ColV); // loads matlab full matrix static void LoadMatlabTFltVV(const TStr& FNm, TFltVV& MatrixVV); // loads matlab full matrix static void LoadMatlabTFltVV(TVec<TFltV>& ColV, TSIn& SIn); // loads matlab full matrix static void LoadMatlabTFltVV(TFltVV& MatrixVV, TSIn& SIn); // prints vector to screen static void PrintTFltV(const TFltV& Vec, const TStr& VecNm); // print matrix to string static void PrintTFltVVToStr(const TFltVV& A, TStr& Out); // print matrixt to screen static void PrintTFltVV(const TFltVV& A, const TStr& MatrixNm); // print sparse matrix to screen static void PrintSpMat(const TTriple<TIntV, TIntV, TFltV>& A, const TStr& MatrixNm); // print sparse matrix to screen static void PrintSpMat(const TVec<TIntFltKdV>& A, const TStr& MatrixNm); // prints vector to screen static void PrintTIntV(const TIntV& Vec, const TStr& VecNm); // fills vector with random numbers static void FillRnd(TFltV& Vec) { TRnd Rnd(0); FillRnd(Vec.Len(), Vec, Rnd); } static void FillRnd(TFltV& Vec, TRnd& Rnd) { FillRnd(Vec.Len(), Vec, Rnd); } static void FillRnd(TFltVV& Mat) { TRnd Rnd(0); FillRnd(Mat, Rnd); } static void FillRnd(TFltVV& Mat, TRnd& Rnd) { FillRnd(Mat.Get1DVec(), Rnd); } static void FillRnd(const int& Len, TFltV& Vec, TRnd& Rnd); // set all components static void Fill(TFltVV& M, const double& Val); static void Fill(TFltV& M, const double& Val); // sets all compnents to zero static void FillZero(TFltV& Vec) { Vec.PutAll(0.0); } static void FillZero(TFltVV& M) { Fill(M, 0.0); } // set matrix to identity static void FillIdentity(TFltVV& M); static void FillIdentity(TFltVV& M, const double& Elt); // set vector to range static void FillRange(const int& Vals, TFltV& Vec); static void FillRange(const int& Vals, TIntV& Vec); template <class TVal, class TTSizeTyTy = int> static void FillRangeS(const TTSizeTyTy& Vals, TVec<TVal, TTSizeTyTy>& Vec); // sums elements in vector static int SumVec(const TIntV& Vec); static double SumVec(const TFltV& Vec); // converts full vector to sparse static void ToSpVec(const TFltV& Vec, TIntFltKdV& SpVec, const double& CutWordWgtSumPrc = 0.0); // converts sparse vector to full static void ToVec(const TIntFltKdV& SpVec, TFltV& Vec, const int& VecLen); // creates a diagonal matrix static void Diag(const TFltV& Vec, TFltVV& Mat); // creates a diagonal matrix static void Diag(const TFltV& Vec, TVec<TIntFltKdV>& Mat); // gets the maximal index of a sparse vector static int GetMaxDimIdx(const TIntFltKdV& SpVec); // gets the maximal row index of a sparse column matrix static int GetMaxDimIdx(const TVec<TIntFltKdV>& SpMat); // returns the index of the minimum element static int GetMinIdx(const TFltV& Vec); // returns a vector with a sequence starting at Min and ending at Max static void RangeV(const int& Min, const int& Max, TIntV& Res); // returns the mean value of Vec. static double Mean(const TFltV& Vec); // returns the mean value along the dimension (Dim) of Mat. See Matlab documentation - mean(). static void Mean(const TFltVV& Mat, TFltV& Vec, const TMatDim& Dim = TMatDim::mdCols); // returns standard deviation. See Matlab documentation - std(). static void Std(const TFltVV& Mat, TFltV& Vec, const int& Flag = 0, const TMatDim& Dim = TMatDim::mdCols); // returns the z-score for each element of X such that columns of X are centered to have mean 0 and scaled to have standard deviation 1. static void ZScore(const TFltVV& Mat, TFltVV& Vec, const int& Flag = 0, const TMatDim& Dim = TMatDim::mdCols); }; #ifdef SCALAPACK template<class TSizeTy> void TLAMisc::Sort(TVec<TFlt, TSizeTy> & Vec, TVec<TSizeTy, TSizeTy>& Index, const TBool& DecreseP) { if (Index.Empty()) { TLAMisc::FillRange(Vec.Len(), index); } char* id = DecreseP ? "D" : "I"; TSizeTy n = Vec.Len(); TSizeTy info; dlasrt2(id, &n, &Vec[0].Val, &Index[0], &info); } #endif template <class TVal, class TTSizeTyTy> void TLAMisc::FillRangeS(const TTSizeTyTy& Vals, TVec<TVal, TTSizeTyTy>& Vec) { //Added by Andrej if (Vec.Len() != Vals){ Vec.Gen(Vals); } for (int i = 0; i < Vals; i++){ Vec[i] = i; } }; ////////////////////////////////////////////////////////////////////// // Linear Algebra Utilities class TLAUtil { public: // generates a vector of ones with dimension dim template <class TVal, class TSizeTy> static void Ones(const int& Dim, TVec<TVal, TSizeTy>& OnesV) { if (OnesV.Len() != Dim) { OnesV.Gen(Dim); } for (int i = 0; i < Dim; i++) { OnesV[i] = 1; } } // generates a vector with i on index i template <class TVal, class TSizeTy> static void Range(const int& Dim, TVec<TVal, TSizeTy>& RangeV) { if (RangeV.Len() != Dim) { RangeV.Gen(Dim); } for (TSizeTy i = 0; i < Dim; i++) { RangeV[i] = TVal(i); } } template <class TType, class TSizeTy, bool ColMajor> static void Identity(const TSizeTy& Dim, TVVec<TType, TSizeTy, ColMajor>& X) { if (X.Empty()) { X.Gen(Dim, Dim); } EAssert(X.GetRows() == Dim && X.GetCols() == Dim); for (TSizeTy i = 0; i < Dim; i++) { X(i,i) = 1; } } // returns a sub matrix of the input matrix in range [StartRow, EndRow) x [StartCol, EndCol) template <class TType, class TSizeTy, bool ColMajor> static void SubMat(const TVVec<TType, TSizeTy, ColMajor>& Mat, const TSizeTy& StartRow, const TSizeTy& EndRow, const TSizeTy& StartCol, const TSizeTy& EndCol, TVVec<TType, TSizeTy, ColMajor>& SubMat) { EAssert(StartRow >= 0 && StartCol >= 0); EAssert(EndRow < Mat.GetRows() && EndCol < Mat.GetCols()); if (SubMat.GetRows() != EndRow - StartRow || SubMat.GetCols() != EndCol - StartCol) { SubMat.Gen(EndRow - StartRow, EndCol - StartCol); } for (TSizeTy i = StartRow; i < EndRow; i++) { for (TSizeTy j = StartCol; j < EndCol; j++) { SubMat.PutXY(i - StartRow, j - StartCol, Mat(i,j)); } } } template <class TType, class TVecVal, class TSizeTy, bool ColMajor> static void SubMat(const TVVec<TType, TSizeTy, ColMajor>& Mat, const TVec<TVecVal, TSizeTy>& ColIdxV, TVVec<TType, TSizeTy, ColMajor>& SubMat) { if (SubMat.Empty()) { SubMat.Gen(Mat.GetRows(), ColIdxV.Len()); } EAssert(SubMat.GetRows() == Mat.GetRows() && SubMat.GetCols() == ColIdxV.Len()); TVec<TType, TSizeTy> ColV; for (TSizeTy i = 0; i < ColIdxV.Len(); i++) { const TSizeTy& ColN = ColIdxV[i]; EAssert(0 <= ColN && ColN < Mat.GetCols()); Mat.GetCol(ColN, ColV); SubMat.SetCol(i, ColV); } } template <class TType, class TSizeTy, bool ColMajor> static void GetRow(const TVVec<TType, TSizeTy, ColMajor>& Mat, const TSizeTy& RowN, TVec<TType, TSizeTy>& RowV) { EAssert(0 <= RowN && RowN < Mat.GetRows()); const TSizeTy Cols = Mat.GetCols(); if (RowV.Len() != Mat.GetCols()) { RowV.Gen(Cols); } for (TSizeTy ColN = 0; ColN < Cols; ColN++) { RowV[ColN] = Mat(RowN, ColN); } } template <class TVal, class TSizeTy> static TSizeTy GetMaxIdx(const TVec<TVal, TSizeTy>& Vec) { if (Vec.Empty()) { return -1; } TSizeTy MxIdx = 0; TVal MxVal = Vec[0]; for (TSizeTy i = 1; i < Vec.Len(); i++ ) { if (Vec[i] > MxVal) { MxVal = Vec[i]; MxIdx = i; } } return MxIdx; } template <class TType, class TSizeTy, bool ColMajor> static void CenterRows(TVVec<TType, TSizeTy, ColMajor>& X) { const TSizeTy Rows = X.GetRows(); const TSizeTy Cols = X.GetCols(); #pragma omp parallel for for (TSizeTy RowIdx = 0; RowIdx < Rows; RowIdx++) { TType RowMean = 0; for (TSizeTy ColIdx = 0; ColIdx < Cols; ColIdx++) { RowMean += X(RowIdx, ColIdx); } RowMean /= Cols; for (int ColIdx = 0; ColIdx < Cols; ColIdx++) { X(RowIdx, ColIdx) -= RowMean; } } } }; ////////////////////////////////////////////////////////////////////// // Template-ised Sparse Operations template <class TKey, class TDat> class TSparseOps { public: /// Transform sparse matrix from (row,col,val) triplets to a vector of sparse columns static void CoordinateCreateSparseColMatrix(const TVec<TKey>& RowIdxV, const TVec<TKey>& ColIdxV, const TVec<TDat>& ValV, TVec<TVec<TKeyDat<TKey, TDat> > >& ColMatrix, const TKey& Cols); /// Merge given sparse vectors using +operator on KeyDat elements with same Key value static void SparseMerge(const TVec<TKeyDat<TKey, TDat> >& SrcV1, const TVec<TKeyDat<TKey, TDat> >& SrcV2, TVec<TKeyDat<TKey, TDat> >& DstV); /// Construct sparse linear combination (DstV = p*SrcV1 + q*SrcV2) static void SparseLinComb(const double& p, const TVec<TKeyDat<TKey, TDat> >& SrcV1, const double& q, const TVec<TKeyDat<TKey, TDat> >& SrcV2, TVec<TKeyDat<TKey, TDat> >& DstV); }; typedef TSparseOps<TInt, TFlt> TSparseOpsIntFlt; template <class TKey, class TDat> void TSparseOps<TKey, TDat>::CoordinateCreateSparseColMatrix(const TVec<TKey>& RowIdxV, const TVec<TKey>& ColIdxV, const TVec<TDat>& ValV, TVec<TVec<TKeyDat<TKey, TDat> > >& ColMatrix, const TKey& Cols) { ColMatrix.Gen(Cols); EAssert(RowIdxV.Len() == ColIdxV.Len() && RowIdxV.Len() == ValV.Len()); TKey Els = RowIdxV.Len(); for (TKey ElN = 0; ElN < Els; ElN++) { ColMatrix[ColIdxV[ElN]].Add(TKeyDat<TKey, TDat>(RowIdxV[ElN], ValV[ElN])); } for (TKey ColN = 0; ColN < Cols; ColN++) { ColMatrix[ColN].Sort(); } } template <class TKey, class TDat> void TSparseOps<TKey, TDat>::SparseMerge(const TVec<TKeyDat<TKey, TDat> >& SrcV1, const TVec<TKeyDat<TKey, TDat> >& SrcV2, TVec<TKeyDat<TKey, TDat> >& DstV) { DstV.Clr(); const int Src1Len = SrcV1.Len(); const int Src2Len = SrcV2.Len(); int Src1N = 0, Src2N = 0; while (Src1N < Src1Len && Src2N < Src2Len) { if (SrcV1[Src1N].Key < SrcV2[Src2N].Key) { DstV.Add(SrcV1[Src1N]); Src1N++; } else if (SrcV1[Src1N].Key > SrcV2[Src2N].Key) { DstV.Add(SrcV2[Src2N]); Src2N++; } else { DstV.Add(TKeyDat<TKey, TDat>(SrcV1[Src1N].Key, SrcV1[Src1N].Dat + SrcV2[Src2N].Dat)); Src1N++; Src2N++; } } while (Src1N < Src1Len) { DstV.Add(SrcV1[Src1N]); Src1N++; } while (Src2N < Src2Len) { DstV.Add(SrcV2[Src2N]); Src2N++; } } template <class TKey, class TDat> void TSparseOps<TKey, TDat>::SparseLinComb(const double& p, const TVec<TKeyDat<TKey, TDat> >& SrcV1, const double& q, const TVec<TKeyDat<TKey, TDat> >& SrcV2, TVec<TKeyDat<TKey, TDat> >& DstV) { DstV.Clr(); const int Src1Len = SrcV1.Len(); const int Src2Len = SrcV2.Len(); int Src1N = 0, Src2N = 0; while (Src1N < Src1Len && Src2N < Src2Len) { if (SrcV1[Src1N].Key < SrcV2[Src2N].Key) { DstV.Add(TKeyDat<TKey, TDat>(SrcV1[Src1N].Key, p * SrcV1[Src1N].Dat)); Src1N++; } else if (SrcV1[Src1N].Key > SrcV2[Src2N].Key) { DstV.Add(TKeyDat<TKey, TDat>(SrcV2[Src2N].Key, q * SrcV2[Src2N].Dat)); Src2N++; } else { DstV.Add(TKeyDat<TKey, TDat>(SrcV1[Src1N].Key, p * SrcV1[Src1N].Dat + q * SrcV2[Src2N].Dat)); Src1N++; Src2N++; } } while (Src1N < Src1Len) { DstV.Add(TKeyDat<TKey, TDat>(SrcV1[Src1N].Key, p * SrcV1[Src1N].Dat)); Src1N++; } while (Src2N < Src2Len) { DstV.Add(TKeyDat<TKey, TDat>(SrcV2[Src2N].Key, q * SrcV2[Src2N].Dat)); Src2N++; } } /////////////////////////////////////////////////////////////////////// /// Matrix. Class for matrix-vector and matrix-matrix operations class TMatrix { private: bool Transposed; protected: virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const = 0; virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const = 0; virtual void PMultiply(const TFltV& Vec, TFltV& Result) const = 0; virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const = 0; virtual void PMultiply(const TFltVV& B, TFltVV& Result) const { FailR("TMatrix PMultiply(const TFltVV& B, TFltVV& Result) not implemented"); } virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const { FailR("TMatrix PMultiplyT(const TFltVV& B, TFltVV& Result) not implemented"); } virtual int PGetRows() const = 0; virtual int PGetCols() const = 0; public: TMatrix() : Transposed(false) {} virtual ~TMatrix() { } // Result = A * B(:,ColId) void Multiply(const TFltVV& B, int ColId, TFltV& Result) const { if (Transposed) { PMultiplyT(B, ColId, Result); } else { PMultiply(B, ColId, Result); } } // Result = A' * B(:,ColId) void MultiplyT(const TFltVV& B, int ColId, TFltV& Result) const { if (Transposed) { PMultiply(B, ColId, Result); } else { PMultiplyT(B, ColId, Result); } } // Result = A * Vec void Multiply(const TFltV& Vec, TFltV& Result) const { if (Transposed) { PMultiplyT(Vec, Result); } else { PMultiply(Vec, Result); } } // Result = A' * Vec void MultiplyT(const TFltV& Vec, TFltV& Result) const{ if (Transposed) { PMultiply(Vec, Result); } else { PMultiplyT(Vec, Result); } } // Result = A * B void Multiply(const TFltVV& B, TFltVV& Result) const { if (Transposed) { PMultiplyT(B, Result); } else { PMultiply(B, Result); } } // Result = A' * B void MultiplyT(const TFltVV& B, TFltVV& Result) const { if (Transposed) { PMultiply(B, Result); } else { PMultiplyT(B, Result); } } // number of rows int GetRows() const { return Transposed ? PGetCols() : PGetRows(); } // number of columns int GetCols() const { return Transposed ? PGetRows() : PGetCols(); } virtual void Transpose() { Transposed = !Transposed; } void Save(TSOut& SOut) const { TBool(Transposed).Save(SOut); } void Load(TSIn& SIn) { Transposed = TBool(SIn); } }; /////////////////////////////////////////////////////////////////////// // Sparse-Column-Matrix // matrix is given with columns as sparse vectors class TSparseColMatrix : public TMatrix { public: // number of rows and columns of matrix TInt RowN, ColN; // vector of sparse columns TVec<TIntFltKdV> ColSpVV; protected: // Result = A * B(:,ColId) virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A * Vec virtual void PMultiply(const TFltV& Vec, TFltV& Result) const; // Result = A' * B(:,ColId) virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A' * Vec virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const; // Result = A * B virtual void PMultiply(const TFltVV& B, TFltVV& Result) const; // Result = A' * B virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const; void Init(); int PGetRows() const { return RowN; } int PGetCols() const { return ColN; } public: TSparseColMatrix(): TMatrix() {} TSparseColMatrix(const int& _RowN, const int& _ColN): RowN(_RowN), ColN(_ColN), ColSpVV() {} TSparseColMatrix(const TVec<TIntFltKdV>& _ColSpVV): TMatrix(), ColSpVV(_ColSpVV) { Init(); } TSparseColMatrix(const TVec<TIntFltKdV>& _ColSpVV, const int& _RowN, const int& _ColN) : TMatrix(), RowN(_RowN), ColN(_ColN), ColSpVV(_ColSpVV) {} void Save(TSOut& SOut) { RowN.Save(SOut); ColN.Save(SOut); ColSpVV.Save(SOut); } void Load(TSIn& SIn) { RowN.Load(SIn); ColN.Load(SIn); ColSpVV = TVec<TIntFltKdV>(SIn); } }; /////////////////////////////////////////////////////////////////////// // Sparse-Row-Matrix // matrix is given with rows as sparse vectors class TSparseRowMatrix : public TMatrix { public: // number of rows and columns of matrix TInt RowN, ColN; // vector of sparse rows TVec<TIntFltKdV> RowSpVV; protected: // Result = A * B(:,ColId) virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A * Vec virtual void PMultiply(const TFltV& Vec, TFltV& Result) const; // Result = A' * B(:,ColId) virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A' * Vec virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const; // Result = A * B virtual void PMultiply(const TFltVV& B, TFltVV& Result) const { FailR("Not implemented yet"); } // TODO // Result = A' * B virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const { FailR("Not implemented yet"); } // TODO void Init(); int PGetRows() const { return RowN; } int PGetCols() const { return ColN; } public: TSparseRowMatrix(): TMatrix() {} TSparseRowMatrix(const TVec<TIntFltKdV>& _RowSpVV): TMatrix(), RowSpVV(_RowSpVV) { Init(); } TSparseRowMatrix(const TVec<TIntFltKdV>& _RowSpVV, const int& _RowN, const int& _ColN): TMatrix(), RowN(_RowN), ColN(_ColN), RowSpVV(_RowSpVV) {} // loads Matlab sparse matrix format: row, column, value. // Indexes start with 1. TSparseRowMatrix(const TStr& MatlabMatrixFNm); void Save(TSOut& SOut) { RowN.Save(SOut); ColN.Save(SOut); RowSpVV.Save(SOut); } void Load(TSIn& SIn) { RowN.Load(SIn); ColN.Load(SIn); RowSpVV = TVec<TIntFltKdV>(SIn); } }; /////////////////////////////////////////////////////////////////////// // Full-Col-Matrix // matrix is given with columns of full vectors class TFullColMatrix : public TMatrix { public: // number of rows and columns of matrix TInt RowN, ColN; // vector of sparse columns TVec<TFltV> ColV; protected: // Result = A * B(:,ColId) virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A * Vec virtual void PMultiply(const TFltV& Vec, TFltV& Result) const; // Result = A' * B(:,ColId) virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A' * Vec virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const; // Result = A * B virtual void PMultiply(const TFltVV& B, TFltVV& Result) const { FailR("Not implemented yet"); } // TODO // Result = A' * B virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const { FailR("Not implemented yet"); } // TODO int PGetRows() const { return RowN; } int PGetCols() const { return ColN; } public: TFullColMatrix(): TMatrix() {} // loads matrix saved in matlab with command: // save -ascii Matrix.dat M TFullColMatrix(const TStr& MatlabMatrixFNm); TFullColMatrix(TVec<TFltV>& RowVV); void Save(TSOut& SOut) { RowN.Save(SOut); ColN.Save(SOut); ColV.Save(SOut); } void Load(TSIn& SIn) { RowN.Load(SIn); ColN.Load(SIn); ColV.Load(SIn); } }; /////////////////////////////////////////////////////////////////////// // Structured-Covariance-Matrix // matrix is a product of two sparse matrices X Y' (column examples, row features), // which are centered implicitly by using two dense mean vectors class TStructuredCovarianceMatrix : public TMatrix { private: // number of rows and columns of matrix int XRows, YRows; int Samples; // mean vectors TFltV MeanX; TFltV MeanY; TTriple<TIntV, TIntV, TFltV> X; TTriple<TIntV, TIntV, TFltV> Y; protected: // Result = A * B(:,ColId) virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A * B virtual void PMultiply(const TFltVV& B, TFltVV& Result) const; // Result = A * Vec virtual void PMultiply(const TFltV& Vec, TFltV& Result) const; // Result = A' * B(:,ColId) virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const; // Result = A' * B virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const; // Result = A' * Vec virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const; int PGetRows() const { return XRows; } int PGetCols() const { return YRows; } public: TStructuredCovarianceMatrix() : TMatrix() {} TStructuredCovarianceMatrix(const int XRowN_, const int YRowN_, const int SampleN_, const TFltV& MeanX_, const TFltV& MeanY_, const TTriple<TIntV, TIntV, TFltV>& X_, const TTriple<TIntV, TIntV, TFltV>& Y_) : TMatrix(), XRows(XRowN_), YRows(YRowN_), Samples(SampleN_), MeanX(MeanX_), MeanY(MeanY_), X(X_), Y(Y_) {}; void Save(TSOut& SOut) { SOut.Save(XRows); SOut.Save(YRows); SOut.Save(Samples); MeanX.Save(SOut); MeanY.Save(SOut); X.Save(SOut); Y.Save(SOut); } void Load(TSIn& SIn) { SIn.Load(XRows); SIn.Load(YRows); SIn.Load(Samples); MeanX.Load(SIn); MeanY.Load(SIn); X.Load(SIn); Y.Load(SIn); } }; /////////////////////////////////////////////////////////////////////// // Basic Linear Algebra operations class TLinAlg { public: /// Result = <x, y> template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double DotProduct(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y); /// Result = <X(:,ColId), y> inline static double DotProduct(const TVec<TFltV>& X, int ColId, const TFltV& y); /// Result = <X[ColId], y> inline static double DotProduct(const TVec<TIntFltKdV>& X, int ColId, const TFltV& y); /// Result = <X(:,ColId), Y(:,ColId)> template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColIdX, const TVVec<TType, TSizeTy, ColMajor>& Y, int ColIdY); /// Result = <X(:,ColId), y> template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, const TVec<TType, TSizeTy>& y); /// Result = <x, y> inline static double DotProduct(const TIntFltKdV& x, const TIntFltKdV& y); /// Result = <x, y> template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double DotProduct(const TVec<TType, TSizeTy>& x, const TVec<TIntFltKd>& y); /// Result = <X(:,ColId), y> template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, const TIntFltKdV& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void OuterProduct(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVVec<TType, TSizeTy, ColMajor>& Z); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void LinComb(const double& p, const TVec<TType, TSizeTy>& x, const double& q, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z); //TODO this will work only for glib type TFlt template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void LinCombInPlace(const TType& alpha, const TVec<TNum<TType>, TSizeTy>& x, const TType& beta, TVec<TNum<TType>, TSizeTy>& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void LinComb(const double& p, const TVVec<TType, TSizeTy, ColMajor>& X, const double& q, const TVVec<TType, TSizeTy, ColMajor>& Y, TVVec<TType, TSizeTy, ColMajor>& Z); // z = p * x + q * y inline static void LinComb(const double& p, const TIntFltKdV& x, const double& q, const TIntFltKdV& y, TIntFltKdV& z); inline static void LinComb(const double& p, const TFltVV& X, int ColId, const double& q, const TFltV& y, TFltV& z); inline static void LinComb(const double& p, const TFltVV& X, int DimId, const double& q, const TFltV& y, TFltV& z, int Dim); inline static void LinComb(const double& p, const TFltVV& X, const double& q, const TFltVV& Y, TFltVV& Z); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void ConvexComb(const double& p, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z); //this will fail if TType != TFlt, Specialization should be used #ifdef BLAS template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(const TType& k, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y); #endif template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(const double& k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z); inline static void AddVec(const double& k, const TVec<TFltV>& X, int ColId, const TFltV& y, TFltV& z); inline static void AddVec(const double& k, const TFltVV& X, int ColId, const TFltV& y, TFltV& z); // z := x + y template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z); inline static void AddVec(const double& k, const TIntFltKdV& x, const TFltV& y, TFltV& z); // z := k * X[ColId] + y inline static void AddVec(const double& k, const TVec<TIntFltKdV>& X, int ColId, const TFltV& y, TFltV& z); inline static void AddVec(const double& k, const TIntFltKdV& x, TFltV& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(double k, const TVVec<TType, TSizeTy, ColMajor>& X, TSizeTy ColIdX, TVVec<TType, TSizeTy, ColMajor>& Y, TSizeTy ColIdY); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(const double& k, const TVec<TType, TSizeTy>& x, TVVec<TType, TSizeTy, ColMajor>& Y, const TSizeTy& ColIdY); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(double k, const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, TVec<TType, TSizeTy>& Result); // z = x + y template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AddVec(const TIntFltKdV& x, const TIntFltKdV& y, TIntFltKdV& z); template <class TType, class TSizeTy = int> inline static double SumVec(const TVec<TType, TSizeTy>& x); inline static double SumVec(const TIntFltKdV& x); template <class TType, class TSizeTy = int> inline static double SumVec(double k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y); // Result = ||x-y||^2 (Euclidian); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double EuclDist2(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y); // Result = ||x-y||^2 (Euclidian); inline static double EuclDist2(const TFltPr& x, const TFltPr& y); template <class TType, class TSizeTy = int> inline static double EuclDist(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y); inline static double EuclDist(const TFltPr& x, const TFltPr& y); // Result = ||A||_F (Frobenious); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static TType Frob(const TVVec<TNum<TType>, TSizeTy, ColMajor> &A); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double FrobDist2(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double FrobDist2(const TVec<TType, TSizeTy>& A, const TVec<TType, TSizeTy>& B); template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Sparse(const TVVec<TType, TSizeTy, ColMajor>& A, TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B); template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Sparse(const TVVec<TType, TSizeTy, ColMajor>& A, TVec<TIntFltKdV>& B); template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Full(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TVVec<TType, TSizeTy, ColMajor>& B, const int Rows, const int Cols); // Sparse to dense transform template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Full(const TVec<TIntFltKdV, TSizeTy>& A, TVVec<TType, TSizeTy, ColMajor>& B, TSizeTy Rows); template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Transpose(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& At); inline static void Transpose(const TVec<TIntFltKdV>& A, TVec<TIntFltKdV>& At, int Rows = -1); // Sign inline static void Sign(const TVec<TIntFltKdV>& Mat, TVec<TIntFltKdV>& Mat2); inline static void Convert(const TVec<TPair<TIntV, TFltV>>& A, TTriple<TIntV, TIntV, TFltV>& B); inline static void Convert(const TVec<TIntFltKdV>& A, TTriple<TIntV, TIntV, TFltV>&B); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Sum(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TType, TSizeTy>& y, const int Dimension = 1); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double SumRow(const TVVec<TType, TSizeTy, ColMajor>& X, const int& RowN); template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void Sum(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& X, TVec<TType, TSizeTy>& y, const int Dimension = 1); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double Norm2(const TVec<TType, TSizeTy>& x); // ||x|| (Euclidian); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double Norm(const TVec<TType, TSizeTy>& x); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double Normalize(TVec<TType, TSizeTy>& x); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void NormalizeColumn(TVVec<TType, TSizeTy, ColMajor>& X, const TSizeTy& ColId); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void NormalizeColumns(TVVec<TType, TSizeTy, ColMajor>& X); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void NormalizeRows(TVVec<TType, TSizeTy, ColMajor>& X); #ifdef INTEL // TEST template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void NormalizeColumns(TVVec<TType, TSizeTy, ColMajor>& X, TBool ColumnMajor); #endif template <class TType, class TSizeTy = int, bool ColMajor = false, class IndexType = TInt> inline static void NormalizeColumns(TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& X); // Normalize the columns of X template<class TSizeTy = int> inline static void NormalizeColumns(TVec<TIntFltKdV, TSizeTy>& X); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double FrobNorm2(const TVVec<TType, TSizeTy, ColMajor>& X); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double FrobNorm(const TVVec<TType, TSizeTy, ColMajor>& X); // ||x||^2 (Euclidian), x is sparse template<class TSizeTy = int> inline static double Norm2(const TVec<TIntFltKdV, TSizeTy>& x); // ||x|| (Euclidian), x is sparse template<class TSizeTy = int> inline static double Norm(const TVec<TIntFltKdV, TSizeTy>& x); // ||X(:, ColId)|| (Euclidian), x is sparse template<class TSizeTy = int> inline static double Norm(const TVec<TIntFltKdV, TSizeTy>& x, const int& ColId); // x := x / ||x||, x is sparse template<class TSizeTy = int, TSizeTy> inline static void Normalize(TVec<TIntFltKdV>& x); // ||X(:,ColId)||^2 (Euclidian); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double Norm2(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static double Norm(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId); // L1 norm of x (Sum[|xi|, i = 1..n]); template <class TType, class TSizeTy = int> inline static double NormL1(const TVec<TType, TSizeTy>& x); template <class TType, class TSizeTy = int> inline static double NormL1(double k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y); // L1 norm of x (Sum[|xi|, i = 1..n]); inline static double NormL1(const TIntFltKdV& x); template <class TType, class TSizeTy = int> inline static void NormalizeL1(TVec<TType, TSizeTy>& x); // x := x / ||x||_1 inline static void NormalizeL1(TIntFltKdV& x); template <class TType, class TSizeTy = int> inline static double NormLinf(const TVec<TType, TSizeTy>& x); // Linf norm of x (Max{|xi|, i = 1..n}); inline static double NormLinf(const TIntFltKdV& x); template <class TType, class TSizeTy = int> inline static void NormalizeLinf(TVec<TType, TSizeTy>& x); // x := x / ||x||_inf, , x is sparse inline static void NormalizeLinf(TIntFltKdV& x); inline static void GetColNormV(const TFltVV& X, TFltV& ColNormV); // stores the norm of all the columns into the output vector inline static void GetColNorm2V(const TFltVV& X, TFltV& ColNormV); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static int GetRowMaxIdx(const TVVec<TType, TSizeTy, ColMajor>& X, const TSizeTy& RowN); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static int GetColMaxIdx(const TVVec<TType, TSizeTy, ColMajor>& X, const int& ColN); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void GetRowMaxIdxV(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TInt, TSizeTy>& IdxV); // find the index of maximum elements for each col of X template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void GetColMaxIdxV(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TInt, TSizeTy>& IdxV); template <class TType, class TSizeTy = int> inline static void MultiplyScalar(const double& k, TVec<TType, TSizeTy>& x); // find the index of maximum elements for a given each col of X inline static int GetColMinIdx(const TFltVV& X, const int& ColN); // find the index of maximum elements for each col of X inline static void GetColMinIdxV(const TFltVV& X, TIntV& IdxV); template <class TVal> inline static TVal GetColMin(const TVVec<TVal>& X, const int& ColN); template <class TVal> inline static void GetColMinV(const TVVec<TVal>& X, TVec<TVal>& ValV); template <class TType, class TSizeTy = int> inline static void MultiplyScalar(const double& k, const TVec<TType, TSizeTy>& x, TVec<TType, TSizeTy>& y); // y := k * x inline static void MultiplyScalar(const double& k, const TIntFltKdV& x, TIntFltKdV& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyScalar(const double& k, const TVVec<TType, TSizeTy, ColMajor>& X, TVVec<TType, TSizeTy, ColMajor>& Y); // Y := k * X template <class TSizeTy = int> inline static void MultiplyScalar(const double& k, const TVec<TIntFltKdV, TSizeTy>& X, TVec<TIntFltKdV, TSizeTy>& Y); // y := A * x template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TType, TSizeTy>& x, TVec<TType, TSizeTy>& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TType, TSizeTy>& x, TVVec<TType, TSizeTy, ColMajor>& C, TSizeTy ColId); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, int ColId, TVec<TType, TSizeTy>& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, int ColIdB, TVVec<TType, TSizeTy, ColMajor>& C, int ColIdC); //LAPACKE stuff #ifdef LAPACKE // Tested in other function //A is rewritten in place with orthogonal matrix Q template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void QRbasis(TVVec<TType, TSizeTy, ColMajor>& A); template <class TType, class TSizeTy, bool ColMajor> inline static void QRbasis(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& Q); // Tested in other function //A is rewritten in place with orthogonal matrix Q (column pivoting to improve stability); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void QRcolpbasis(TVVec<TType, TSizeTy, ColMajor>& A); // TEST template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void QRcolpbasis(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& Q); // TEST //S S option ensures that A is not modified template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void thinSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& U, TVec<TType, TSizeTy>& S, TVVec<TType, TSizeTy, ColMajor>& VT); static void SVDFactorization(const TFltVV& A, TFltVV& U, TFltV& Sing, TFltVV& VT) { // data used for factorization int NumOfRows_Matrix = A.GetRows(); int NumOfCols_Matrix = A.GetCols(); // handle edge cases where the factorization is trivial. Double and float only! if (NumOfRows_Matrix == 1) { U.Gen(1, 1); U(0, 0) = 1; VT = A; Sing.Gen(1); // normalize VT and set Sing[0] = oldnorm(VT) TFltV& RawV = VT.Get1DVec(); Sing[0] = TLinAlg::Normalize(RawV); return; } else if (NumOfCols_Matrix == 1) { VT.Gen(1, 1); VT(0, 0) = 1; U = A; Sing.Gen(1); // normalize U and set Sing[0] = oldnorm(U) TFltV& RawV = U.Get1DVec(); Sing[0] = TLinAlg::Normalize(RawV); return; } int LeadingDimension_Matrix = NumOfCols_Matrix; int Matrix_Layout = LAPACK_ROW_MAJOR; // preperation for factorization Sing.Gen(MIN(NumOfRows_Matrix, NumOfCols_Matrix)); TFltV UpDiag, TauQ, TauP; UpDiag.Gen(MIN(NumOfRows_Matrix, NumOfCols_Matrix) - 1); TauQ.Gen(MIN(NumOfRows_Matrix, NumOfCols_Matrix)); TauP.Gen(MIN(NumOfRows_Matrix, NumOfCols_Matrix)); // bidiagonalization of Matrix TFltVV M = A; LAPACKE_dgebrd(Matrix_Layout, NumOfRows_Matrix, NumOfCols_Matrix, &M(0, 0).Val, LeadingDimension_Matrix, &Sing[0].Val, &UpDiag[0].Val, &TauQ[0].Val, &TauP[0].Val); // matrix U used in the SVD factorization U = M; LAPACKE_dorgbr(Matrix_Layout, 'Q', NumOfRows_Matrix, MIN(NumOfRows_Matrix, NumOfCols_Matrix), NumOfCols_Matrix, &U(0, 0).Val, LeadingDimension_Matrix, &TauQ[0].Val); // matrix VT used in the SVD factorization VT = M; LAPACKE_dorgbr(Matrix_Layout, 'P', MIN(NumOfRows_Matrix, NumOfCols_Matrix), NumOfCols_Matrix, NumOfRows_Matrix, &VT(0, 0).Val, LeadingDimension_Matrix, &TauP[0].Val); // factorization TFltVV C(U.GetCols(), 1); char UpperLower = NumOfRows_Matrix >= NumOfCols_Matrix ? 'U' : 'L'; int LeadingDimension_VT = VT.GetCols(); int LeadingDimension_U = U.GetCols(); LAPACKE_dbdsqr(Matrix_Layout, UpperLower, Sing.Len(), VT.GetCols(), U.GetRows(), 0, &Sing[0].Val, &UpDiag[0].Val, &VT(0, 0).Val, LeadingDimension_VT, &U(0, 0).Val, LeadingDimension_U, &C(0, 0).Val, 1); } static void SVDSolve(const TFltVV& A, TFltV& x, const TFltV& b, const double& EpsSing) { Assert(A.GetRows() == b.Len()); // data used for solution int NumOfRows_Matrix = A.GetRows(); int NumOfCols_Matrix = A.GetCols(); // generating the SVD factorization TFltVV U, VT, M = A; TFltV Sing; SVDFactorization(M, U, Sing, VT); // generating temporary solution x.Gen(NumOfCols_Matrix); TLAMisc::FillZero(x); TFltV ui; ui.Gen(U.GetRows()); TFltV vi; vi.Gen(VT.GetCols()); double Scalar; int i = 0; while (i < MIN(NumOfRows_Matrix, NumOfCols_Matrix) && Sing[i].Val > EpsSing*Sing[0]) { U.GetCol(i, ui); VT.GetRow(i, vi); Scalar = TLinAlg::DotProduct(ui, b) / Sing[i].Val; TLinAlg::AddVec(Scalar, vi, x); i++; } } #endif static int ComputeThinSVD(const TMatrix& X, const int& k, TFltVV& U, TFltV& s, TFltVV& V, const int Iters = 2, const double Tol = 1e-6); #ifdef INTEL template <class TType, class TSizeTy, bool ColMajor = false> inline static void MultiplySF(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, false>& B, TVVec<TType, TSizeTy, ColMajor>& C, const TStr& transa = TStr("N"), const int& format = 0); template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyFS(TVVec<TType, TSizeTy, ColMajor>& B, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TVVec<TType, TSizeTy, ColMajor>& C); #endif // y := A * x template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TPair<TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& x, TVec<TType, TSizeTy>& y); //y := x' * A ... row data!! template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TPair<TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& x, const TVVec<TType, TSizeTy, ColMajor>& A, TVec<TType, TSizeTy>& y); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y); #ifdef BLAS typedef enum { NOTRANS = 0, TRANS = 1 } TLinAlgBlasTranspose; template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVVec<TNum<TType>, TSizeTy, ColMajor>& B, TVVec<TNum<TType>, TSizeTy, ColMajor>& C, const int& BlasTransposeFlagA, const int& BlasTransposeFlagB); #endif #ifdef BLAS template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y, const int& BlasTransposeFlagA, TType alpha = 1.0, TType beta = 0.0); #endif template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C); template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B, TVVec<TType, TSizeTy, ColMajor>& C); template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B, TVVec<TType, TSizeTy, ColMajor>& C); //#if !defined(INTEL) || defined(INDEX_64); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Multiply(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C); inline static void Multiply(const TFltVV& A, const TVec<TIntFltKdV>& B, TFltVV& C); // C:= A' * B template <class IndexType = TInt, class TType, class TSizeTy = int, bool ColMajor = false> inline static void MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TVec<TKeyDat<IndexType, TType>, TSizeTy>, TSizeTy>& B, TVVec<TType, TSizeTy, ColMajor>& C); inline static void Multiply(const TVec<TIntFltKdV>& A, const TFltVV& B, TFltVV& C, const int RowsA = -1); inline static void MultiplyT(const TVec<TIntFltKdV>& A, const TFltVV& B, TFltVV& C); inline static void Multiply(const TVec<TIntFltKdV>& A, const TVec<TIntFltKdV>& B, TFltVV& C, const int RowsA = -1); inline static void MultiplyT(const TVec<TIntFltKdV>& A, const TVec<TIntFltKdV>& B, TFltVV& C); typedef enum { GEMM_NO_T = 0, GEMM_A_T = 1, GEMM_B_T = 2, GEMM_C_T = 4 } TLinAlgGemmTranspose; template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Gemm(const double& Alpha, const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, const double& Beta, const TVVec<TType, TSizeTy, ColMajor>& C, TVVec<TType, TSizeTy, ColMajor>& D, const int& TransposeFlags); typedef enum { DECOMP_SVD } TLinAlgInverseType; template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Inverse(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor >& B, const TLinAlgInverseType& DecompType); // subtypes of finding an inverse (works only for TFltVV, cuz of TSvd); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void InverseSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B, const double& tol); // subtypes of finding an inverse (works only for TFltVV, cuz of TSvd); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void InverseSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B); // transpose matrix - B = A' template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Transpose(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B); // performes Gram-Schmidt ortogonalization on elements of Q template <class TSizeTy = int> inline static void GS(TVec<TVec<TFlt, TSizeTy>, TSizeTy>& Q); template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void GS(TVVec<TType, TSizeTy, ColMajor>& Q); // Modified Gram-Schmidt on columns of matrix Q inline static void MGS(TFltVV& Q); // QR based on Modified Gram-Schmidt decomposition. inline static void QR(const TFltVV& X, TFltVV& Q, TFltVV& R, const TFlt& Tol); // rotates vector (OldX,OldY) for angle Angle (in radians!); inline static void Rotate(const double& OldX, const double& OldY, const double& Angle, double& NewX, double& NewY); // checks if set of vectors is ortogonal template <class TSizeTy = int> inline static void AssertOrtogonality(const TVec<TVec<TFlt, TSizeTy>, TSizeTy>& Vecs, const double& Threshold); //ColMajor oriented data for optimal result template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void AssertOrtogonality(const TVVec<TType, TSizeTy, ColMajor>& Vecs, const double& Threshold); inline static bool IsOrthonormal(const TFltVV& Vecs, const double& Threshold); inline static bool IsZero(const TFltV& Vec); // returns the k-th power of the given matrix // negative values of k are allowed template <class TType, class TSizeTy = int, bool ColMajor = false> inline static void Pow(const TVVec<TType, TSizeTy, ColMajor>& Mat, const int& k, TVVec<TType, TSizeTy, ColMajor>& PowVV); }; ////////////////////////////////////////////////////////////////////// // Basic Linear Algebra Operations //class TLinAlg { //public: // <x,y> // TEST template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::DotProduct(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y) { EAssertR(x.Len() == y.Len(), TStr::Fmt("%d != %d", x.Len(), y.Len())); TType result = 0.0; const TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) result += x[i] * y[i]; return result; } double TLinAlg::DotProduct(const TVec<TFltV>& X, int ColId, const TFltV& y) { EAssert(0 <= ColId && ColId < X.Len()); return DotProduct(X[ColId], y); } double TLinAlg::DotProduct(const TVec<TIntFltKdV>& X, int ColId, const TFltV& y) { EAssert(0 <= ColId && ColId < X.Len()); return DotProduct(y, X[ColId]); } // TEST // <X(:,ColIdX), Y(:,ColIdY)> template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColIdX, const TVVec<TType, TSizeTy, ColMajor>& Y, int ColIdY) { EAssert(X.GetRows() == Y.GetRows()); TType result = 0.0; const TSizeTy len = X.GetRows(); for (TSizeTy i = 0; i < len; i++) result = result + X(i, ColIdX) * Y(i, ColIdY); return result; } // TEST // <X(:,ColId), Vec> template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, const TVec<TType, TSizeTy>& Vec) { EAssert(X.GetRows() == Vec.Len()); TType result = 0.0; const TSizeTy len = X.GetRows(); for (TSizeTy i = 0; i < len; i++) result += X(i, ColId) * Vec[i]; return result; } // sparse dot products: // <x,y> where x AND y are sparse //TODO TIntFltKdV indexing and is TInt enough? double TLinAlg::DotProduct(const TIntFltKdV& x, const TIntFltKdV& y) { const int xLen = x.Len(), yLen = y.Len(); double Res = 0.0; int i1 = 0, i2 = 0; while (i1 < xLen && i2 < yLen) { if (x[i1].Key < y[i2].Key) i1++; else if (x[i1].Key > y[i2].Key) i2++; else { Res += x[i1].Dat * y[i2].Dat; i1++; i2++; } } return Res; } // <x,y> where only y is sparse //TODO TIntFltKdV indexing and is TInt enough? template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::DotProduct(const TVec<TType, TSizeTy>& x, const TVec<TIntFltKd>& y) { double Res = 0.0; const int xLen = x.Len(), yLen = y.Len(); for (TSizeTy i = 0; i < yLen; i++) { const TSizeTy key = y[i].Key; if (key < xLen) Res += y[i].Dat * x[key]; } return Res; } // <X(:,ColId),y> where only y is sparse template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::DotProduct(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, const TIntFltKdV& y) { TType Res = 0.0; const TSizeTy n = X.GetRows(), yLen = y.Len(); for (TSizeTy i = 0; i < yLen; i++) { const TSizeTy key = y[i].Key; if (key < n) Res += y[i].Dat * X(key, ColId); } return Res; } // TEST // z = x * y' template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::OuterProduct(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVVec<TType, TSizeTy, ColMajor>& Z) { EAssert(Z.GetRows() == x.Len() && Z.GetCols() == y.Len()); const TSizeTy XLen = x.Len(); const TSizeTy YLen = y.Len(); for (TSizeTy i = 0; i < XLen; i++) { for (TSizeTy j = 0; j < YLen; j++) { Z(i, j) = x[i] * y[j]; } } } // z := p * x + q * y //TODO should double be TType? template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::LinComb(const double& p, const TVec<TType, TSizeTy>& x, const double& q, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z) { EAssert(x.Len() == y.Len() && y.Len() == z.Len()); const TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) { z[i] = p * x[i] + q * y[i]; } } //TODO this will work only for glib type TFlt template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::LinCombInPlace(const TType& alpha, const TVec<TNum<TType>, TSizeTy>& x, const TType& beta, TVec<TNum<TType>, TSizeTy>& y) { #ifdef BLAS if (TypeCheck::is_double<TType>::value == true){ typedef double Loc; cblas_daxpby(x.Len(), *((Loc *)&alpha), (Loc *)&x[0].Val, 1, *((Loc *)&beta), (Loc *)&y[0].Val, 1); } else if (TypeCheck::is_float<TType>::value == true){ typedef float Loc; cblas_saxpby(x.Len(), *((Loc *)&alpha), (Loc *)&x[0].Val, 1, *((Loc *)&beta), (Loc *)&y[0].Val, 1); } else if (TypeCheck::is_complex_double<TType>::value == true){ typedef double Loc; //std::complex<double> alpha_(alpha); std::complex<double> beta_(beta); cblas_zaxpby(x.Len(), (const Loc*)&alpha, (const Loc*)&x[0].Val, 1, (const Loc*)&beta, (Loc*)&y[0].Val, 1); } else if (TypeCheck::is_complex_float<TType>::value == true){ typedef float Loc; //std::complex<float> alpha_((float)alpha); std::complex<float> beta_((float)beta); cblas_caxpby(x.Len(), (const Loc*)&alpha, (const Loc*)&x[0].Val, 1, (const Loc*)&beta, (Loc*)&y[0].Val, 1); } #else LinComb(alpha, x, beta, y, y); #endif } // TEST // Z := p * X + q * Y //TODO double or type? template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::LinComb(const double& p, const TVVec<TType, TSizeTy, ColMajor>& X, const double& q, const TVVec<TType, TSizeTy, ColMajor>& Y, TVVec<TType, TSizeTy, ColMajor>& Z) { EAssert(X.GetRows() == Y.GetRows() && X.GetCols() == Y.GetCols() && X.GetRows() == Z.GetRows() && X.GetCols() == Z.GetCols()); TSizeTy Rows = X.GetRows(); TSizeTy Cols = X.GetCols(); for (TSizeTy RowN = 0; RowN < Rows; RowN++) { for (TSizeTy ColN = 0; ColN < Cols; ColN++) { Z.At(RowN, ColN) = p*X.At(RowN, ColN) + q*Y.At(RowN, ColN); } } } // z = p * x + q * y void TLinAlg::LinComb(const double& p, const TIntFltKdV& x, const double& q, const TIntFltKdV& y, TIntFltKdV& z) { TSparseOpsIntFlt::SparseLinComb(p, x, q, y, z); } void TLinAlg::LinComb(const double& p, const TFltVV& X, int ColId, const double& q, const TFltV& y, TFltV& z) { if (z.Empty()) z.Gen(X.GetRows()); EAssert(X.GetRows() == y.Len() && y.Len() == z.Len()); const int len = z.Len(); for (int i = 0; i < len; i++) { z[i] = p * X(i, ColId) + q * y[i]; } } void TLinAlg::LinComb(const double& p, const TFltVV& X, int DimId, const double& q, const TFltV& y, TFltV& z, int Dim) { EAssertR(Dim == 1 || Dim == 2, "TLinAlg::LinComb: Invalid value of argument Dim."); if (Dim == 1) { if (z.Empty()) z.Gen(X.GetRows()); EAssert(X.GetRows() == y.Len() && y.Len() == z.Len()); const int len = z.Len(); for (int i = 0; i < len; i++) { z[i] = p * X(i, DimId) + q * y[i]; } } else if (Dim == 2) { if (z.Empty()) z.Gen(X.GetCols()); EAssert(X.GetCols() == y.Len() && y.Len() == z.Len()); const int len = z.Len(); for (int i = 0; i < len; i++) { z[i] = p * X(DimId, i) + q * y[i]; } } } void TLinAlg::LinComb(const double& p, const TFltVV& X, const double& q, const TFltVV& Y, TFltVV& Z) { if (Z.Empty()) Z.Gen(X.GetRows(), X.GetCols()); EAssert(X.GetRows() == Y.GetRows() && X.GetCols() == Y.GetCols() && X.GetRows() == Z.GetRows() && X.GetCols() == Z.GetCols()); int Rows = X.GetRows(); int Cols = X.GetCols(); for (int RowN = 0; RowN < Rows; RowN++) { for (int ColN = 0; ColN < Cols; ColN++) { Z.At(RowN, ColN) = p*X.At(RowN, ColN) + q*Y.At(RowN, ColN); } } } // TEST // z := p * x + (1 - p) * y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::ConvexComb(const double& p, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z) { AssertR(0.0 <= p && p <= 1.0, TFlt::GetStr(p)); TLinAlg::LinComb(p, x, 1.0 - p, y, z); } //this will fail if TType != TFlt, Specialization should be used #ifdef BLAS // TEST //y = k * x + y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const TType& k, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y) { if (TypeCheck::is_double<TType>::value == true){ typedef double Loc; cblas_daxpy(x.Len(), *((Loc *)&k), (Loc *)&x[0].Val, 1, (Loc *)&y[0].Val, 1); } else if (TypeCheck::is_float<TType>::value == true){ typedef float Loc; cblas_saxpy(x.Len(), *((Loc *)&k), (Loc *)&x[0].Val, 1, (Loc *)&y[0].Val, 1); } else if (TypeCheck::is_complex_double<TType>::value == true){ typedef double Loc; cblas_zaxpy(x.Len(), (const Loc *)&k, (const Loc*)&x[0].Val, 1, (Loc *)&y[0].Val, 1); } else if (TypeCheck::is_complex_float<TType>::value == true){ typedef float Loc; cblas_caxpy(x.Len(), (const Loc *)&k, (const Loc *)&x[0].Val, 1, (Loc *)&y[0].Val, 1); } //cblas_daxpy(x.Len(), k, &x[0].Val, 1, &y[0].Val, 1); } #endif // TEST // z := k * x + y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z) { TLinAlg::LinComb(k, x, 1.0, y, z); } // z := k * X[ColId] + y //Andrej template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TVec<TFltV>& X, int ColId, const TFltV& y, TFltV& z) { EAssert(0 <= ColId && ColId < X.Len()); AddVec(k, X[ColId], y, z); } // z := k * X(:,ColId) + y //Andrej template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TFltVV& X, int ColId, const TFltV& y, TFltV& z) { EAssert(X.GetRows() == y.Len()); EAssert(y.Len() == z.Len()); const int len = z.Len(); for (int i = 0; i < len; i++) { z[i] = y[i] + k * X(i, ColId); } } // z := x + y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y, TVec<TType, TSizeTy>& z) { TLinAlg::LinComb(1.0, x, 1.0, y, z); } // z := k * x + y //template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, const TFltV& y, TFltV& z) { EAssert(y.Len() == z.Len()); z = y; // first we set z to be y // and than we add x to z (==y) const int xLen = x.Len(), yLen = y.Len(); for (int i = 0; i < xLen; i++) { const int ii = x[i].Key; if (ii < yLen) { z[ii] = k * x[i].Dat + y[ii]; } } } // z := k * X[ColId] + y void TLinAlg::AddVec(const double& k, const TVec<TIntFltKdV>& X, int ColId, const TFltV& y, TFltV& z) { EAssert(0 <= ColId && ColId < X.Len()); AddVec(k, X[ColId], y, z); } // y := k * x + y //template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, TFltV& y) { const int xLen = x.Len(), yLen = y.Len(); for (int i = 0; i < xLen; i++) { const int ii = x[i].Key; if (ii < yLen) { y[ii] += k * x[i].Dat; } } } // TEST // Y(:,Col) += k * X(:,Col) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(double k, const TVVec<TType, TSizeTy, ColMajor>& X, TSizeTy ColIdX, TVVec<TType, TSizeTy, ColMajor>& Y, TSizeTy ColIdY) { EAssert(X.GetRows() == Y.GetRows()); const TSizeTy len = Y.GetRows(); for (TSizeTy i = 0; i < len; i++) { Y(i, ColIdY) = Y(i, ColIdY) + k * X(i, ColIdX); } } // TEST // Y(:,ColIdY) += k * x template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const double& k, const TVec<TType, TSizeTy>& x, TVVec<TType, TSizeTy, ColMajor>& Y, const TSizeTy& ColIdY) { EAssert(x.Len() == Y.GetRows()); EAssert(ColIdY >= 0 && ColIdY < x.Len()); for (TSizeTy RowN = 0; RowN < Y.GetRows(); RowN++) { Y.At(RowN, ColIdY) += k*x[RowN]; } } // TEST // Result += k * X(:,Col) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(double k, const TVVec<TType, TSizeTy, ColMajor>& X, int ColId, TVec<TType, TSizeTy>& Result) { EAssert(X.GetRows() == Result.Len()); const TSizeTy len = Result.Len(); for (TSizeTy i = 0; i < len; i++) { Result[i] = Result[i] + k * X(i, ColId); } } // z = x + y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AddVec(const TIntFltKdV& x, const TIntFltKdV& y, TIntFltKdV& z) { TSparseOpsIntFlt::SparseMerge(x, y, z); } // TEST // Result = SUM(x) template <class TType, class TSizeTy> double TLinAlg::SumVec(const TVec<TType, TSizeTy>& x) { const TSizeTy len = x.Len(); double Res = 0.0; for (int i = 0; i < len; i++) { Res += x[i]; } return Res; } // Result = SUM(x) //template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::SumVec(const TIntFltKdV& x) { const int len = x.Len(); double Res = 0.0; for (int i = 0; i < len; i++) { Res += x[i].Dat; } return Res; } // TEST // Result = SUM(k*x + y) template <class TType, class TSizeTy> double TLinAlg::SumVec(double k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y) { EAssert(x.Len() == y.Len()); const TSizeTy len = x.Len(); double Res = 0.0; for (TSizeTy i = 0; i < len; i++) { Res += k * x[i] + y[i]; } return Res; } // Result = ||x-y||^2 (Euclidian) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::EuclDist2(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y) { EAssert(x.Len() == y.Len()); const TSizeTy len = x.Len(); double Res = 0.0; for (TSizeTy i = 0; i < len; i++) { Res += TMath::Sqr(x[i] - y[i]); } return Res; } // Result = ||x-y||^2 (Euclidian) double TLinAlg::EuclDist2(const TFltPr& x, const TFltPr& y) { return TMath::Sqr(x.Val1 - y.Val1) + TMath::Sqr(x.Val2 - y.Val2); } // TEST // Result = ||x-y|| (Euclidian) template <class TType, class TSizeTy> double TLinAlg::EuclDist(const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y) { return sqrt(TLinAlg::EuclDist2(x, y)); } // Result = ||x-y|| (Euclidian) //template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::EuclDist(const TFltPr& x, const TFltPr& y) { return sqrt(TLinAlg::EuclDist2(x, y)); } // Result = ||A||_F (Frobenious) template <class TType, class TSizeTy, bool ColMajor> TType TLinAlg::Frob(const TVVec<TNum<TType>, TSizeTy, ColMajor> &A) { TType frob = 0; for (int RowN = 0; RowN < A.GetRows(); RowN++) { for (int ColN = 0; ColN < A.GetCols(); ColN++) { frob += A.At(RowN, ColN)*A.At(RowN, ColN); } } return sqrt(frob); } // TEST // Result = ||A - B||_F (Frobenious) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::FrobDist2(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B) { double frob = 0; TVec<TType, TSizeTy> Apom = (const_cast<TVVec<TType, TSizeTy, ColMajor> &>(A)).Get1DVec(); TVec<TType, TSizeTy> Bpom = (const_cast<TVVec<TType, TSizeTy, ColMajor> &>(B)).Get1DVec(); frob = TLinAlg::EuclDist2(Apom, Bpom); /*for (int RowN = 0; RowN < A.GetRows(); RowN++) { for (int ColN = 0; ColN < A.GetCols(); ColN++) { frob += (A.At(RowN, ColN) - B.At(RowN, ColN))*(A.At(RowN, ColN) - B.At(RowN, ColN)); } }*/ return frob; } // TEST // Result = ||A - B||_F (Frobenious) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::FrobDist2(const TVec<TType, TSizeTy>& A, const TVec<TType, TSizeTy>& B) { double frob = 0; frob = TLinAlg::EuclDist2(A, B); /*for (int RowN = 0; RowN < A.Len(); RowN++) { frob += (A[RowN] - B[RowN])*(A[RowN] - B[RowN]); }*/ return frob; } // Dense to sparse transform // TEST // Dense to sparse transform template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Sparse(const TVVec<TType, TSizeTy, ColMajor>& A, TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B){ B.Val1.Gen(0); B.Val2.Gen(0); B.Val3.Gen(0); for (TSizeTy RowN = 0; RowN < A.GetRows(); RowN++) { for (TSizeTy ColN = 0; ColN < A.GetCols(); ColN++) { if (A.At(RowN, ColN) != 0.0) { B.Val1.Add(RowN); B.Val2.Add(ColN); B.Val3.Add(A.At(RowN, ColN)); } } } } // Dense to sparse transform //TODO fix TVec<TIntFltKdV> indexing and type template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Sparse(const TVVec<TType, TSizeTy, ColMajor>& A, TVec<TIntFltKdV>& B){ TSizeTy Cols = A.GetCols(); TSizeTy Rows = A.GetRows(); B.Gen(Cols); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { B[ColN].Gen(0); for (TSizeTy RowN = 0; RowN < Rows; RowN++) { if (A.At(RowN, ColN) != 0.0) { B[ColN].Add(TIntFltKd(RowN, A.At(RowN, ColN))); } } } } // TEST // Sparse to dense transform template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Full(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TVVec<TType, TSizeTy, ColMajor>& B, const int Rows, const int Cols) { B.Gen(Rows, Cols); B.PutAll(0.0); TSizeTy nnz = A.Val1.Len(); for (TSizeTy ElN = 0; ElN < nnz; ElN++) { B.At(A.Val1[ElN], A.Val2[ElN]) = A.Val3[ElN]; } } // Sparse to dense transform template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Full(const TVec<TIntFltKdV, TSizeTy>& A, TVVec<TType, TSizeTy, ColMajor>& B, TSizeTy Rows){ TSizeTy Cols = A.Len(); B.Gen(Rows, Cols); B.PutAll(0.0); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { TSizeTy Els = A[ColN].Len(); for (TSizeTy ElN = 0; ElN < Els; ElN++) { B.At(A[ColN][ElN].Key, ColN) = A[ColN][ElN].Dat; } } } // TEST // Transpose template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Transpose(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& At) { TSizeTy nnz = A.Val1.Len(); At.Val1.Gen(nnz, 0); At.Val2.Gen(nnz, 0); At.Val3.Gen(nnz, 0); TVec<TSizeTy, TSizeTy> index; TIntV::SortGetPerm(A.Val2, At.Val1, index); for (TSizeTy ElN = 0; ElN < nnz; ElN++) { //At.Val1.Add(A.Val2[ElN]); At.Val2.Add(A.Val1[index[ElN]]); At.Val3.Add(A.Val3[index[ElN]]); } } // Transpose //TODO Index template void TLinAlg::Transpose(const TVec<TIntFltKdV>& A, TVec<TIntFltKdV>& At, int Rows){ // A is a sparse col matrix: int Cols = A.Len(); // find number of rows if (Rows == -1) { for (int ColN = 0; ColN < Cols; ColN++) { int Els = A[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { Rows = MAX(Rows, A[ColN][ElN].Key.Val); } } Rows = Rows + 1; } At.Gen(Rows); // transpose for (int ColN = 0; ColN < Cols; ColN++) { int Els = A[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { At[A[ColN][ElN].Key].Add(TIntFltKd(ColN, A[ColN][ElN].Dat)); } } // sort for (int ColN = 0; ColN < Rows; ColN++) { At[ColN].Sort(); } } // Sign void TLinAlg::Sign(const TVec<TIntFltKdV>& Mat, TVec<TIntFltKdV>& Mat2) { Mat2 = Mat; int Cols = Mat2.Len(); for (int ColN = 0; ColN < Cols; ColN++) { int Els = Mat2[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { Mat2[ColN][ElN].Dat = TMath::Sign(Mat2[ColN][ElN].Dat); } } } // Vector of sparse vectors to sparse matrix (coordinate representation) //TODO Index template void TLinAlg::Convert(const TVec<TPair<TIntV, TFltV>>& A, TTriple<TIntV, TIntV, TFltV>& B) { B.Val1.Clr(); B.Val2.Clr(); B.Val3.Clr(); int Cols = A.Len(); for (int ColN = 0; ColN < Cols; ColN++) { int Nnz = A[ColN].Val1.Len(); for (int ElN = 0; ElN < Nnz; ElN++) { B.Val1.Add(A[ColN].Val1[ElN]); B.Val2.Add(ColN); B.Val3.Add(A[ColN].Val2[ElN]); } } } // Vector of sparse vectors to sparse matrix (coordinate representation) //TODO Index template void TLinAlg::Convert(const TVec<TIntFltKdV>& A, TTriple<TIntV, TIntV, TFltV>&B) { int Cols = A.Len(); int TotalNnz = 0; for (int ColN = 0; ColN < Cols; ColN++) { TotalNnz += A[ColN].Len(); } B.Val1.Gen(TotalNnz, 0); B.Val2.Gen(TotalNnz, 0); B.Val3.Gen(TotalNnz, 0); for (int ColN = 0; ColN < Cols; ColN++) { int Nnz = A[ColN].Len(); for (int ElN = 0; ElN < Nnz; ElN++) { B.Val1.Add(A[ColN][ElN].Key); B.Val2.Add(ColN); B.Val3.Add(A[ColN][ElN].Dat); } } } // TEST // sum columns (Dimension = 1) or rows (Dimension = 2) and store them in vector y template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Sum(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TType, TSizeTy>& y, const int Dimension){ TSizeTy Cols = X.GetCols(); TSizeTy Rows = X.GetRows(); if (Dimension == 1) { y.Gen(Cols); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { for (TSizeTy RowN = 0; RowN < Rows; RowN++) { y[ColN] += X.At(RowN, ColN); } } } else if (Dimension == 2) { y.Gen(Rows); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { for (TSizeTy RowN = 0; RowN < Rows; RowN++) { y[RowN] += X.At(RowN, ColN); } } } else FailR("Dimension should be 1 or 2"); } template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::SumRow(const TVVec<TType, TSizeTy, ColMajor>& X, const int& RowN) { EAssertR(RowN < X.GetRows(), "Row index exceeds the number of rows!"); const int Cols = X.GetCols(); double Sum = 0; for (int ColN = 0; ColN < Cols; ColN++) { Sum += X(RowN, ColN); } return Sum; } // TEST // sum columns (Dimesnion = 2) or rows (Dimension = 1) and store them in vector y template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::Sum(const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& X, TVec<TType, TSizeTy>& y, const int Dimension) { TSizeTy Cols = X.Val2.GetMxVal() + 1; TSizeTy Rows = X.Val1.GetMxVal() + 1; TSizeTy Els = X.Val1.Len(); if (Dimension == 1) { y.Gen(Cols); for (TSizeTy ElN = 0; ElN < Els; ElN++) { //int RowN = X.Val1[ElN]; TSizeTy ColN = X.Val2[ElN]; y[ColN] += X.Val3[ElN]; } } else if (Dimension == 2) { y.Gen(Rows); for (TSizeTy ElN = 0; ElN < Els; ElN++) { TSizeTy RowN = X.Val1[ElN]; //int ColN = X.Val2[ElN]; y[RowN] += X.Val3[ElN]; } } else FailR("Dimension should be 1 or 2"); } // TEST // ||x||^2 (Euclidian) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::Norm2(const TVec<TType, TSizeTy>& x) { return TLinAlg::DotProduct(x, x); } // ||x|| (Euclidian) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::Norm(const TVec<TType, TSizeTy>& x) { return sqrt(TLinAlg::Norm2(x)); } //Andrej switch this to TNum<TType> // TEST // x := x / ||x|| template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::Normalize(TVec<TType, TSizeTy>& x) { const double xNorm = TLinAlg::Norm(x); if (xNorm > 0.0) { TLinAlg::MultiplyScalar(1 / xNorm, x, x); } return xNorm; } // TEST // Normalize X(:,ColId) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::NormalizeColumn(TVVec<TType, TSizeTy, ColMajor>& X, const TSizeTy& ColId) { double nX = TLinAlg::Norm(X, ColId); if (nX > 0.0) { for (TSizeTy RowN = 0; RowN < X.GetRows(); RowN++) { X.At(RowN, ColId) /= nX; } } } // TEST // Normalize the columns of X template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::NormalizeColumns(TVVec<TType, TSizeTy, ColMajor>& X) { for (TSizeTy ColN = 0; ColN < X.GetCols(); ColN++) { TLinAlg::NormalizeColumn(X, ColN); } } template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::NormalizeRows(TVVec<TType, TSizeTy, ColMajor>& X) { for (TSizeTy RowN = 0; RowN < X.GetRows(); RowN++) { TVec<TType> Row; X.GetRowPtr(RowN, Row); Normalize(Row); } } #ifdef INTEL // TEST template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::NormalizeColumns(TVVec<TType, TSizeTy, ColMajor>& X, TBool ColumnMajor) { const TSizeTy m = X.GetXDim(); const TSizeTy n = X.GetYDim(); TVVec<TType, TSizeTy, ColMajor> sqrX(m, n); vdSqr(m*n, &X(0, 0).Val, &sqrX(0, 0).Val); printf("Squaring of elements done!\n"); TVec<TType, TSizeTy> enke(m); TVec<TType, TSizeTy> sumsqr(n); TVec<TType, TSizeTy> norme(n); TLAMisc::Fill(enke, 1.0); TLinAlg::MultiplyT(sqrX, enke, sumsqr); printf("Summing elemnents done!\n"); vdInvSqrt(n, &sumsqr[0].Val, &norme[0].Val); printf("Summing and inverting elemnents done!\n"); // added code if (ColMajor) { TVVec<TType, TSizeTy, ColMajor> B; B.Gen(n, m); TLinAlg::Transpose(X, B); for (TSizeTy i = 0; i < m; i++) { vdMul(n, &norme[0].Val, &B(0, i).Val, &B(0, i).Val); } TLinAlg::Transpose(B, X); } else { for (TSizeTy i = 0; i < m; i++){ vdMul(n, &norme[0].Val, &X(i, 0).Val, &X(i, 0).Val); } } //TLAMisc::PrintTFltVV(X, "Normalizirana"); } #endif // Normalize the columns of X //TODO what to do when number //MARK template <class TType, class TSizeTy, bool ColMajor, class IndexType> void TLinAlg::NormalizeColumns(TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& X) { if (X.Val2.Len() == 0) return; EAssert(X.Val2.IsSorted(true)); //int? int Cols = X.Val2.GetMxVal() + 1; TVec<TType, TSizeTy> InvColNorms(Cols); //get the last element colN and set the number of elements TSizeTy Els = X.Val1.Len(); for (TSizeTy ElN = 0; ElN < Els; ElN++) { InvColNorms[X.Val2[ElN]] += X.Val3[ElN] * X.Val3[ElN]; } for (TSizeTy ColN = 0; ColN < Cols; ColN++) { if (InvColNorms[ColN] > 0.0) { InvColNorms[ColN] = 1.0 / TMath::Sqrt(InvColNorms[ColN]); } } for (TSizeTy ElN = 0; ElN < Els; ElN++) { X.Val3[ElN] *= InvColNorms[X.Val2[ElN]]; } } // Normalize the columns of X template<class TSizeTy> void TLinAlg::NormalizeColumns(TVec<TIntFltKdV, TSizeTy>& X) { TSizeTy Cols = X.Len(); for (TSizeTy ElN = 0; ElN < Cols; ElN++) { TLinAlg::Normalize(X[ElN]); } } // Frobenius norm of matrix A // TEST template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::FrobNorm2(const TVVec<TType, TSizeTy, ColMajor>& X) { return TLinAlg::Norm2((const_cast<TVVec<TType, TSizeTy, ColMajor> &>(X)).Get1DVec()); } template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::FrobNorm(const TVVec<TType, TSizeTy, ColMajor>& X) { return sqrt(TLinAlg::FrobNorm2(X)); } // ||x||^2 (Euclidian), x is sparse template<class TSizeTy> double TLinAlg::Norm2(const TVec<TIntFltKdV, TSizeTy>& x) { double Result = 0; for (TSizeTy i = 0; i < x.Len(); i++) { Result += TMath::Sqr(x[i].Dat); } return Result; } // ||x|| (Euclidian), x is sparse template<class TSizeTy> double TLinAlg::Norm(const TVec<TIntFltKdV, TSizeTy>& x) { return sqrt(Norm2(x)); } template<class TSizeTy> double TLinAlg::Norm(const TVec<TIntFltKdV, TSizeTy>& x, const int& ColId) { return Norm(x[ColId]); } // x := x / ||x||, x is sparse template<class TSizeTy, TSizeTy> void TLinAlg::Normalize(TVec<TIntFltKdV>& x) { double Normx = TLinAlg::Norm(x); if (Normx > 0) { TLinAlg::MultiplyScalar(1 / Normx, x, x); } } // ||X(:,ColId)||^2 (Euclidian) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::Norm2(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId) { return TLinAlg::DotProduct(X, ColId, X, ColId); } // TEST // ||X(:,ColId)|| (Euclidian) template <class TType, class TSizeTy, bool ColMajor> double TLinAlg::Norm(const TVVec<TType, TSizeTy, ColMajor>& X, int ColId) { return sqrt(TLinAlg::Norm2(X, ColId)); } // L1 norm of x (Sum[|xi|, i = 1..n]) template <class TType, class TSizeTy> double TLinAlg::NormL1(const TVec<TType, TSizeTy>& x) { double norm = 0.0; const TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) norm += TFlt::Abs(x[i]); return norm; } // TEST // L1 norm of k*x+y (Sum[|k*xi+yi|, i = 1..n]) template <class TType, class TSizeTy> double TLinAlg::NormL1(double k, const TVec<TType, TSizeTy>& x, const TVec<TType, TSizeTy>& y) { EAssert(x.Len() == y.Len()); double norm = 0.0; const TSizeTy len = x.Len(); for (TSizeTy i = 0; i < len; i++) { norm += TFlt::Abs(k * x[i] + y[i]); } return norm; } // L1 norm of x (Sum[|xi|, i = 1..n]) double TLinAlg::NormL1(const TIntFltKdV& x) { double norm = 0.0; const int Len = x.Len(); for (int i = 0; i < Len; i++) norm += TFlt::Abs(x[i].Dat); return norm; } // TEST // x := x / ||x||_1 template <class TType, class TSizeTy> void TLinAlg::NormalizeL1(TVec<TType, TSizeTy>& x) { const double xNorm = TLinAlg::NormL1(x); if (xNorm > 0.0) { TLinAlg::MultiplyScalar(1 / xNorm, x, x); } } // x := x / ||x||_1 void TLinAlg::NormalizeL1(TIntFltKdV& x) { const double xNorm = TLinAlg::NormL1(x); if (xNorm > 0.0) { TLinAlg::MultiplyScalar(1 / xNorm, x, x); } } // TEST // Linf norm of x (Max{|xi|, i = 1..n}) template <class TType, class TSizeTy> double TLinAlg::NormLinf(const TVec<TType, TSizeTy>& x) { double norm = 0.0; const TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) norm = TFlt::GetMx(TFlt::Abs(x[i]), norm); return norm; } // Linf norm of x (Max{|xi|, i = 1..n}) double TLinAlg::NormLinf(const TIntFltKdV& x) { double norm = 0.0; const int Len = x.Len(); for (int i = 0; i < Len; i++) norm = TFlt::GetMx(TFlt::Abs(x[i].Dat), norm); return norm; } // TEST // x := x / ||x||_inf template <class TType, class TSizeTy> void TLinAlg::NormalizeLinf(TVec<TType, TSizeTy>& x) { const double xNormLinf = TLinAlg::NormLinf(x); if (xNormLinf > 0.0) { TLinAlg::MultiplyScalar(1.0 / xNormLinf, x, x); } } // x := x / ||x||_inf, , x is sparse void TLinAlg::NormalizeLinf(TIntFltKdV& x) { const double xNormLInf = TLinAlg::NormLinf(x); if (xNormLInf > 0.0) { TLinAlg::MultiplyScalar(1.0 / xNormLInf, x, x); } } // stores the squared norm of all the columns into the output vector void TLinAlg::GetColNormV(const TFltVV& X, TFltV& ColNormV) { const int Cols = X.GetCols(); GetColNorm2V(X, ColNormV); for (int i = 0; i < Cols; i++) { ColNormV[i] = sqrt(ColNormV[i]); } } // stores the norm of all the columns into the output vector void TLinAlg::GetColNorm2V(const TFltVV& X, TFltV& ColNormV) { const int Cols = X.GetCols(); ColNormV.Gen(Cols); #pragma omp parallel for for (int ColN = 0; ColN < Cols; ColN++) { ColNormV[ColN] = Norm2(X, ColN); } } // TEST // find the index of maximum elements for a given row of X template <class TType, class TSizeTy, bool ColMajor> int TLinAlg::GetRowMaxIdx(const TVVec<TType, TSizeTy, ColMajor>& X, const TSizeTy& RowN) { TSizeTy Idx = -1; TSizeTy Cols = X.GetCols(); double MaxVal = TFlt::Mn; for (TSizeTy ColN = 0; ColN < Cols; ColN++) { double Val = X.At(RowN, ColN); if (MaxVal < Val) { MaxVal = Val; Idx = ColN; } } return Idx; } // TEST // find the index of maximum elements for a given each col of X template <class TType, class TSizeTy, bool ColMajor> int TLinAlg::GetColMaxIdx(const TVVec<TType, TSizeTy, ColMajor>& X, const int& ColN) { TSizeTy Idx = -1; TSizeTy Rows = X.GetRows(); double MaxVal = TFlt::Mn; for (TSizeTy RowN = 0; RowN < Rows; RowN++) { double Val = X.At(RowN, ColN); if (MaxVal < Val) { MaxVal = Val; Idx = RowN; } } return Idx; } // TEST // find the index of maximum elements for each row of X template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::GetRowMaxIdxV(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TInt, TSizeTy>& IdxV) { IdxV.Gen(X.GetRows()); TSizeTy Rows = X.GetRows(); for (TSizeTy RowN = 0; RowN < Rows; RowN++) { IdxV[RowN] = TLinAlg::GetRowMaxIdx(X, RowN); } } // find the index of maximum elements for each col of X template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::GetColMaxIdxV(const TVVec<TType, TSizeTy, ColMajor>& X, TVec<TInt, TSizeTy>& IdxV) { IdxV.Gen(X.GetCols()); TSizeTy Cols = X.GetCols(); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { IdxV[ColN] = TLinAlg::GetColMaxIdx(X, ColN); } } //x := k * x // TEST //x := k * x template <class TType, class TSizeTy> void TLinAlg::MultiplyScalar(const double& k, TVec<TType, TSizeTy>& x) { TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) x[i] = k * x[i]; } // find the index of maximum elements for a given each col of X int TLinAlg::GetColMinIdx(const TFltVV& X, const int& ColN) { const int Rows = X.GetRows(); double MinVal = TFlt::Mx; int MinIdx = -1; for (int RowN = 0; RowN < Rows; RowN++) { double Val = X(RowN, ColN); if (Val < MinVal) { MinVal = Val; MinIdx = RowN; } } return MinIdx; } // find the index of maximum elements for each col of X void TLinAlg::GetColMinIdxV(const TFltVV& X, TIntV& IdxV) { int Cols = X.GetCols(); if (IdxV.Empty()) { IdxV.Gen(Cols); } EAssert(IdxV.Len() == Cols); #pragma omp parallel for for (int ColN = 0; ColN < Cols; ColN++) { IdxV[ColN] = GetColMinIdx(X, ColN); } } //template <class TVal> TVal TLinAlg::GetColMin(const TVVec<TVal>& X, const int& ColN); //template <class TVal> void TLinAlg::GetColMinV(const TVVec<TVal>& X, TVec<TVal>& ValV); // TEST // y := k * x template <class TType, class TSizeTy> void TLinAlg::MultiplyScalar(const double& k, const TVec<TType, TSizeTy>& x, TVec<TType, TSizeTy>& y) { EAssert(x.Len() == y.Len()); TSizeTy Len = x.Len(); for (TSizeTy i = 0; i < Len; i++) y[i] = k * x[i]; } // y := k * x void TLinAlg::MultiplyScalar(const double& k, const TIntFltKdV& x, TIntFltKdV& y) { EAssert(x.Len() == y.Len()); int Len = x.Len(); for (int i = 0; i < Len; i++) { y[i].Key = x[i].Key; y[i].Dat = k * x[i].Dat; } } // TEST // Y := k * X template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyScalar(const double& k, const TVVec<TType, TSizeTy, ColMajor>& X, TVVec<TType, TSizeTy, ColMajor>& Y) { EAssert(X.GetRows() == Y.GetRows() && X.GetCols() == Y.GetCols()); const TSizeTy Rows = X.GetRows(); const TSizeTy Cols = X.GetCols(); for (TSizeTy i = 0; i < Rows; i++) { for (TSizeTy j = 0; j < Cols; j++) { Y(i, j) = k*X(i, j); } } } // Y := k * X template <class TSizeTy> void TLinAlg::MultiplyScalar(const double& k, const TVec<TIntFltKdV, TSizeTy>& X, TVec<TIntFltKdV, TSizeTy>& Y) { // sparse column matrix Y = X; TSizeTy Cols = X.Len(); for (TSizeTy ColN = 0; ColN < Cols; ColN++) { TSizeTy Els = X[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { Y[ColN][ElN].Dat = k * X[ColN][ElN].Dat; } } } // y := A * x template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TType, TSizeTy>& x, TVec<TType, TSizeTy>& y) { if (y.Empty()) { y.Gen(A.GetRows()); } EAssert(A.GetCols() == x.Len() && A.GetRows() == y.Len()); #ifdef BLAS TLinAlg::Multiply(A, x, y, TLinAlgBlasTranspose::NOTRANS, 1.0, 0.0); #else int n = A.GetRows(), m = A.GetCols(); for (int i = 0; i < n; i++) { y[i] = 0.0; for (int j = 0; j < m; j++) { y[i] += A(i, j) * x[j]; } } #endif } // TEST // C(:, ColId) := A * x template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TType, TSizeTy>& x, TVVec<TType, TSizeTy, ColMajor>& C, TSizeTy ColId) { EAssert(A.GetCols() == x.Len() && A.GetRows() == C.GetRows()); TSizeTy n = A.GetRows(), m = A.GetCols(); for (TSizeTy i = 0; i < n; i++) { C(i, ColId) = 0.0; for (TSizeTy j = 0; j < m; j++) C(i, ColId) += A(i, j) * x[j]; } } // TEST // y := A * B(:, ColId) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, int ColId, TVec<TType, TSizeTy>& y) { EAssert(A.GetCols() == B.GetRows() && A.GetRows() == y.Len()); TSizeTy n = A.GetRows(), m = A.GetCols(); for (TSizeTy i = 0; i < n; i++) { y[i] = 0.0; for (TSizeTy j = 0; j < m; j++) y[i] += A(i, j) * B(j, ColId); } } // TEST // C(:, ColIdC) := A * B(:, ColIdB) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, int ColIdB, TVVec<TType, TSizeTy, ColMajor>& C, int ColIdC) { EAssert(A.GetCols() == B.GetRows() && A.GetRows() == C.GetRows()); TSizeTy n = A.GetRows(), m = A.GetCols(); for (TSizeTy i = 0; i < n; i++) { C(i, ColIdC) = 0.0; for (TSizeTy j = 0; j < m; j++) C(i, ColIdC) += A(i, j) * B(j, ColIdB); } } //LAPACKE stuff #ifdef LAPACKE // Tested in other function //A is rewritten in place with orthogonal matrix Q template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::QRbasis(TVVec<TType, TSizeTy, ColMajor>& A) { TSizeTy m = A.GetRows(); TSizeTy n = A.GetCols(); TSizeTy k = A.GetCols(); TSizeTy lda = ColMajor ? m : n; int Matrix_Layout = ColMajor ? LAPACK_COL_MAJOR : LAPACK_ROW_MAJOR; TVec<TType, TSizeTy> tau; tau.Gen(MAX(1, MIN(m, n))); LAPACKE_dgeqrf(Matrix_Layout, m, n, &A(0, 0).Val, lda, &tau[0].Val); LAPACKE_dorgqr(Matrix_Layout, m, n, k, &A(0, 0).Val, lda, &tau[0].Val); } // TEST template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::QRbasis(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& Q) { Q = A; TLinAlg::QRbasis(Q); } // Tested in other function //A is rewritten in place with orthogonal matrix Q (column pivoting to improve stability) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::QRcolpbasis(TVVec<TType, TSizeTy, ColMajor>& A) { TSizeTy m = A.GetRows(); TSizeTy n = A.GetCols(); TSizeTy k = A.GetCols(); TSizeTy lda = ColMajor ? m : n; TSizeTy Matrix_Layout = ColMajor ? LAPACK_COL_MAJOR : LAPACK_ROW_MAJOR; TVec<TType, TSizeTy> tau(MAX(1, MIN(m, n))); TVec<TInt, TSizeTy> jvpt(MAX(1, n)); LAPACKE_dgeqp3(Matrix_Layout, m, n, &A(0, 0).Val, lda, &jvpt[0].Val, &tau[0].Val); LAPACKE_dorgqr(Matrix_Layout, m, n, k, &A(0, 0).Val, lda, &tau[0].Val); } // TEST template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::QRcolpbasis(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& Q) { Q = A; TLinAlg::QRcolpbasis(Q); } // TEST //S S option ensures that A is not modified template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::thinSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& U, TVec<TType, TSizeTy>& S, TVVec<TType, TSizeTy, ColMajor>& VT) { TSizeTy m = A.GetRows(); TSizeTy n = A.GetCols(); TSizeTy thin_dim = MIN(m, n); S.Gen(thin_dim); U.Gen(m, thin_dim); VT.Gen(thin_dim, n); int lda = ColMajor ? m : n; int ldu = ColMajor ? m : thin_dim; int ldvt = ColMajor ? thin_dim : n; TVec<TType, TSizeTy> superb(MAX(1, MIN(m, n))); int opt = ColMajor ? LAPACK_COL_MAJOR : LAPACK_ROW_MAJOR; /*int lda, ldu, ldvt; if (opt == LAPACK_ROW_MAJOR){ lda = n; ldu = thin_dim; ldvt = n; } else{ lda = m; ldu = m; ldvt = thin_dim; }*/ LAPACKE_dgesvd(opt, 'S', 'S', m, n, const_cast<double *>(&A(0, 0).Val), lda, &S[0].Val, &U(0, 0).Val, ldu, &VT(0, 0).Val, ldvt, &superb[0].Val); } #endif //int TLinAlg::ComputeThinSVD(const TMatrix& X, const int& k, TFltVV& U, TFltV& s, TFltVV& V, const int Iters = 2, const double Tol = 1e-6); //Full matrix times sparse vector //No need to reserve anything outside, functions currently take care of memory managment for safety /*template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(TFltVV& ProjMat, TPair<TIntV, TFltV> &, TFltVV& result) { }; template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TFltVV& ProjMat, const TPair<TIntV, TFltV> &, TFltVV& result) { };*/ //////////////////////////////////////////////////////////////////// // Andrej says: // http://software.intel.com/en-us/node/468598#86F42CD2-6A3C-4E1F-B686-8690FCC03C75 //call mkl_dcoomm(transa, m, n, k, alpha, matdescra, val, rowind, colind, nnz, b, ldb, beta, c, ldc) //if transa=='T' op(A) = A' op(m, k) = m else transa == 'N' op(m, k) = k //A is sparse, B and C are full //m Number of rows of the matrix A. n Number of columns of the matrix C. k Number of columns of the matrix A. //A m x k, B op(m, k) x n, C op(m, k) x n //C := alpha*op(A)*B + beta*C // matdescra[6] ={'G', 'G', 'N', 'C', 'Q', 'Q'}; //General, General, Nonunit diagonal, Zero Based indexing #ifdef INTEL // INTEL //Be careful C should be of the proper size! if not populated (works only for rowmajor!) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplySF(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, false>& B, TVVec<TType, TSizeTy, ColMajor>& C, const TStr& transa, const int& format){ //B is row_major TSizeTy m, n, k, ldb, ldc; //ldb = ColMajor ? B.GetRows() : B.GetCols(); //ldc = ColMajor ? C.GetRows() : C.GetCols(); ldb = B.GetCols(); ldc = C.GetCols(); n = C.GetCols(); if (transa == "N"){ m = C.GetRows(); k = B.GetRows(); } else{ k = C.GetRows(); m = B.GetRows(); } double alpha = 1; double beta = 0; char matdescra[6] = { 'G', 'G', 'N', 'C', 'Q', 'Q' }; TSizeTy nnz = A.Val3.Len(); if (format == 0){ MKL_DCOOMM(const_cast<char *>(transa.CStr()), &m, &n, &k, &alpha, matdescra, const_cast<double *>(&A.Val3[0].Val), const_cast<TSizeTy *>(&A.Val1[0].Val), const_cast<TSizeTy *>(&A.Val2[0].Val), &nnz, const_cast<double *>(&B(0, 0).Val), &ldb, &beta, const_cast<double *>(&C(0, 0).Val), &ldc); } else{ //call mkl_dcsrmm(transa, m, n, k, alpha, matdescra, val, indx, pntrb, pntre, b, ldb, beta, c, ldc) printf("Max row %d, max column %d\n", A.Val1.Len() - 1, A.Val2.Len()); mkl_dcsrmm(const_cast<char *>(transa.CStr()), &m, &n, &k, &alpha, matdescra, const_cast<double *>(&A.Val3[0].Val), const_cast<TSizeTy *>(&A.Val2[0].Val), const_cast<TSizeTy *>(&A.Val1[0].Val), const_cast<TSizeTy *>(&A.Val1[1].Val), const_cast<double *>(&B(0, 0).Val), &ldb, &beta, const_cast<double *>(&C(0, 0).Val), &ldc); } } // TEST //B will not be needed anymore (works only for rowmajor!) //TODO to much hacking template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyFS(TVVec<TType, TSizeTy, ColMajor>& B, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& A, TVVec<TType, TSizeTy, ColMajor>& C){ C.SwitchDim(); TTmStopWatch time; time.Start(); B.Transpose(); time.Stop("In place transpose of B costs: "); time.Start(); MultiplySF(A, B, C, TStr("T"));//Heavy hacking time.Stop("Full times sparse multi costs: "); time.Start(); B.Transpose(); time.Stop("In place transpose of B costs: "); time.Start(); C.Transpose(); time.Stop("In place transpose of C costs: "); } #endif // y := A * x template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TPair<TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& x, TVec<TType, TSizeTy>& y) { // Assumptions on x EAssert(x.Val1.Len() == x.Val2.Len()); // Dimensions must match EAssert(A.GetRows() >= (x.Val1.Len() == 0 ? 0 : x.Val1[x.Val1.GetMxValN()] + 1) && A.GetCols() == y.Len()); for (TSizeTy RowN = 0; RowN < A.GetRows(); RowN++) { y[RowN] = 0.0; for (TSizeTy ElN = 0; ElN < x.Val1.Len(); ElN++) { y[RowN] += A.At(RowN, x.Val1[ElN]) * x.Val2[ElN]; } } } //y := x' * A ... row data!! template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TPair<TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& x, const TVVec<TType, TSizeTy, ColMajor>& A, TVec<TType, TSizeTy>& y) { // Assumptions on x EAssert(x.Val1.Len() == x.Val2.Len()); // Dimensions must match EAssert(A.GetCols() >= (x.Val1.Len() == 0 ? 0 : x.Val1[x.Val1.GetMxValN()] + 1) && A.GetRows() == y.Len()); TLAMisc::FillZero(y); int nnz = x.Val1.Len(); for (TSizeTy i = 0; i < nnz; i++) { TVec<TType, TSizeTy> row; (const_cast<TVVec<TType, TSizeTy, ColMajor> &>(A)).GetRowPtr(x.Val1[i], row); //printf("vrstic %d, stolpcev %d\n", A.GetRows(), A.GetCols()); //printf("Row len %d\n", row.Len()); //printf("i je %d, vrstica je %d\n", i, x.Val1[i]); //TLinAlg::LinCombInPlace(x.Val2[i], row, 0.0, y); #ifdef BLAS //y = k * x + y //cblas_daxpy(row.Len(), x.Val2[i].Val, &row[0].Val, 1, &y[0].Val, 1); //cblas_daxpy(x.Len(), k_, (Loc *)&x[0].Val, 1, (Loc *) &y[0].Val, 1); AddVec(x.Val2[i].Val, row, y); #else TLinAlg::LinCombInPlace(x.Val2[i].Val, row, 1.0, y); #endif //printf("Lincomb does not fail\n"); } } // TEST Move to BLAS // y := A' * x template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y) { if (y.Empty()) y.Gen(A.GetCols()); EAssert(A.GetRows() == x.Len() && A.GetCols() == y.Len()); TSizeTy n = A.GetCols(), m = A.GetRows(); for (TSizeTy i = 0; i < n; i++) { y[i] = 0.0; for (TSizeTy j = 0; j < m; j++) y[i] += A(j, i) * x[j]; } } #ifdef BLAS typedef enum { NOTRANS = 0, TRANS = 1 } TLinAlgBlasTranspose; // TEST // C = op(A) * op(B) template <class TType, class TSizeTy, bool ColMajor> inline void TLinAlg::Multiply(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVVec<TNum<TType>, TSizeTy, ColMajor>& B, TVVec<TNum<TType>, TSizeTy, ColMajor>& C, const int& BlasTransposeFlagA, const int& BlasTransposeFlagB) { //C := alpha*op(A)*op(B) + beta*C, //where: //op(X) is one of op(X) = X, or op(X) = XT, or op(X) = XH, //alpha and beta are scalars, //A, B and C are matrices: //op(A) is an m-by-k matrix, //op(B) is a k-by-n matrix, //C is an m-by-n matrix. TSizeTy m, n, k, lda, ldb, ldc; if (BlasTransposeFlagA == TLinAlg::TLinAlgBlasTranspose::TRANS) { m = A.GetCols(); k = A.GetRows(); lda = ColMajor ? k : m; } else { m = A.GetRows(); k = A.GetCols(); lda = ColMajor ? m : k; } if (BlasTransposeFlagB == TLinAlg::TLinAlgBlasTranspose::TRANS) { EAssert(k == B.GetCols()); n = B.GetRows(); ldb = ColMajor ? n : k; } else { EAssert(k == B.GetRows()); n = B.GetCols(); ldb = ColMajor ? k : n; } EAssert(m == C.GetRows() && n == C.GetCols()); // simplified interface ldc = ColMajor ? m : n; #ifdef BLAS //Standard CBLAS interface CBLAS_TRANSPOSE BlasTransA = (BlasTransposeFlagA == TLinAlgBlasTranspose::TRANS) ? CblasTrans : CblasNoTrans; CBLAS_TRANSPOSE BlasTransB = (BlasTransposeFlagB == TLinAlgBlasTranspose::TRANS) ? CblasTrans : CblasNoTrans; CBLAS_ORDER Matrix_Layout = ColMajor ? CblasColMajor : CblasRowMajor; if (TypeCheck::is_double<TType>::value == true){ typedef double Loc; double alpha = 1.0, beta = 0.0; cblas_dgemm(Matrix_Layout, BlasTransA, BlasTransB, m, n, k, alpha, (Loc *)&A(0, 0).Val, lda, (Loc *)&B(0, 0).Val, ldb, beta, (Loc *)&C(0, 0).Val, ldc); } else if (TypeCheck::is_float<TType>::value == true){ typedef float Loc; float alpha = 1.0f, beta = 0.0f; cblas_sgemm(Matrix_Layout, BlasTransA, BlasTransB, m, n, k, alpha, (Loc *)&A(0, 0).Val, lda, (Loc *)&B(0, 0).Val, ldb, beta, (Loc *)&C(0, 0).Val, ldc); } else if (TypeCheck::is_complex_double<TType>::value == true){ typedef double Loc; std::complex<double> alpha(1.0); std::complex<double> beta(0.0); cblas_zgemm(Matrix_Layout, BlasTransA, BlasTransB, m, n, k, (const Loc *)&alpha, (const Loc *)&A(0, 0).Val, lda, (const Loc *)&B(0, 0).Val, ldb, (const Loc *)&beta, (Loc *)&C(0, 0).Val, ldc); } else if (TypeCheck::is_complex_float<TType>::value == true){ typedef float Loc; std::complex<float> alpha(1.0f); std::complex<float> beta(0.0f); cblas_cgemm(Matrix_Layout, BlasTransA, BlasTransB, m, n, k, (const Loc *)&alpha, (const Loc *)&A(0, 0).Val, lda, (const Loc *)&B(0, 0).Val, ldb, (const Loc *)&beta, (Loc *)&C(0, 0).Val, ldc); } #else //Fortran 77 style interface, all values must be passed by reference! TStr TransposeFlagA = "N"; TStr TransposeFlagB = "N"; if (BlasTransposeFlagA){ TransposeFlagA = "T"; /*lda = k;*/ } if (BlasTransposeFlagB){ TransposeFlagB = "T"; /*ldb = n;*/ } #ifdef AMD DGEMM(TransposeFlagA.CStr(), TransposeFlagB.CStr(), &m, &n, &k, &alpha, &A(0, 0).Val, &lda, &B(0, 0).Val, &ldb, &beta, &C(0, 0).Val, &ldc, TransposeFlagA.Len(), TransposeFlagB.Len()); #else dgemm(TransposeFlagA.CStr(), TransposeFlagB.CStr(), &m, &n, &k, &alpha, &A(0, 0).Val, &lda, &B(0, 0).Val, &ldb, &beta, &C(0, 0).Val, &ldc); #endif #endif } #endif #ifdef BLAS // TEST // y := alpha*op(A)*x + beta*y, where op(A) = A -- N, op(A) = A' -- T, op(A) = conj(A') -- C (only for complex) //Andrej ToDo In the future replace TType with TNum<type> and change double to type template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TNum<TType>, TSizeTy, ColMajor>& A, const TVec<TNum<TType>, TSizeTy>& x, TVec<TNum<TType>, TSizeTy>& y, const int& BlasTransposeFlagA, TType alpha, TType beta) { TSizeTy m = A.GetRows(); TSizeTy n = A.GetCols(); //Can we multiply and store in y? if (BlasTransposeFlagA) {//A'*x n*m x m -> n EAssertR(x.Len() == m, "TLinAlg::Multiply: Invalid dimension of input vector!"); if (y.Reserved() != n) { // TODO should I do this here?? Meybe if the length is > n it would also be OK?? y.Gen(n, n); } } else{//A*x m x n * n -> m EAssertR(x.Len() == n, "TLinAlg::Multiply: Invalid dimension of input vector!"); if (y.Reserved() != m) { // TODO should I do this here?? Meybe if the length is > m it would also be OK?? y.Gen(m, m); } } TSizeTy lda = ColMajor ? m : n; TSizeTy incx = /*ColMajor ? x.Len() :*/ 1; TSizeTy incy = /*ColMajor ? y.Len() :*/ 1; CBLAS_ORDER Matrix_Layout = ColMajor ? CblasColMajor : CblasRowMajor; #ifdef BLAS //Standard CBLAS interface CBLAS_TRANSPOSE BlasTransA = BlasTransposeFlagA ? CblasTrans : CblasNoTrans; /*if (BlasTransposeFlagA){ BlasTransA = CblasTrans; }*/ if (TypeCheck::is_double<TType>::value == true){ typedef double Loc; double alpha_ = alpha; double beta_ = beta; cblas_dgemv(Matrix_Layout, BlasTransA, m, n, alpha_, (Loc *)&A(0, 0).Val, lda, (Loc *)&x[0].Val, incx, beta_, (Loc *)&y[0].Val, incy); } else if (TypeCheck::is_float<TType>::value == true){ typedef float Loc; float alpha_ = (float)alpha; float beta_ = (float)beta; cblas_sgemv(Matrix_Layout, BlasTransA, m, n, alpha_, (Loc *)&A(0, 0).Val, lda, (Loc *)&x[0].Val, incx, beta_, (Loc *)&y[0].Val, incy); } else if (TypeCheck::is_complex_double<TType>::value == true){ typedef double Loc; std::complex<double> alpha_(alpha); std::complex<double> beta_(beta); cblas_zgemv(Matrix_Layout, BlasTransA, m, n, (const Loc *)&alpha_, (const Loc *)&A(0, 0).Val, lda, (const Loc *)&x[0].Val, incx, (const Loc *)&beta_, (Loc *)&y[0].Val, incy); } else if (TypeCheck::is_complex_float<TType>::value == true){ typedef float Loc; std::complex<float> alpha_((float)alpha); std::complex<double> beta_((float)beta); cblas_cgemv(Matrix_Layout, BlasTransA, m, n, (const Loc *)&alpha_, (const Loc *)&A(0, 0).Val, lda, (const Loc *)&x[0].Val, incx, (const Loc *)&beta_, (Loc *)&y[0].Val, incy); } #else //Fortran 77 style interface, all values must be passed by reference! TStr TransposeFlag = "N"; if (BlasTransposeFlagA){ TransposeFlag = 'T'; } #ifdef AMD DGEMV(TransposeFlag.CStr(), &m, &n, &alpha, &A(0, 0).Val, &lda, &x[0].Val, &incx, &beta, &y[0].Val, &incy, TransposeFlag.Len()); //DGEMV(char *trans, int *m, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy, int trans_len); #else dgemv(TransposeFlag.CStr(), &m, &n, &alpha, &A(0, 0).Val, &lda, &x[0].Val, &incx, &beta, &y[0].Val, &incy); #endif #endif } #endif // TEST // C = A * B template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C) { EAssert(A.GetRows() == C.GetRows() && B.GetCols() == C.GetCols() && A.GetCols() == B.GetRows()); #ifdef BLAS TLinAlg::Multiply(A, B, C, TLinAlgBlasTranspose::NOTRANS, TLinAlgBlasTranspose::NOTRANS); #else TSizeTy RowsA = A.GetRows(); TSizeTy ColsA = A.GetCols(); TSizeTy ColsB = B.GetCols(); C.PutAll(0.0); for (TSizeTy RowN = 0; RowN < RowsA; RowN++) { for (TSizeTy ColAN = 0; ColAN < ColsA; ColAN++) { double Weight = A(RowN, ColAN); for (TSizeTy ColBN = 0; ColBN < ColsB; ColBN++) { C(RowN, ColBN) += Weight * B(ColAN, ColBN); } } } #endif } // TEST // C = A' * B template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C) { if (C.Empty()) { C.Gen(A.GetCols(), B.GetCols()); } EAssert(A.GetCols() == C.GetRows() && B.GetCols() == C.GetCols() && A.GetRows() == B.GetRows()); #ifdef BLAS TLinAlg::Multiply(A, B, C, TLinAlgBlasTranspose::TRANS, TLinAlgBlasTranspose::NOTRANS); #else TSizeTy n = C.GetRows(), m = C.GetCols(), l = A.GetRows(); double sum; for (TSizeTy i = 0; i < n; i++) { for (TSizeTy j = 0; j < m; j++) { sum = 0.0; for (TSizeTy k = 0; k < l; k++) sum += A(k, i)*B(k, j); C(i, j) = sum; } } #endif } ////////////////// // DENSE-SPARSE, SPARSE-DENSE // TEST // C := A * B template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TVVec<TType, TSizeTy, ColMajor>& A, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B, TVVec<TType, TSizeTy, ColMajor>& C){ // B well defined EAssert(B.Val1.Len() == B.Val2.Len() && B.Val2.Len() == B.Val3.Len()); // Dimensions must match C.PutAll(0.0); if (B.Val1.Len() == 0) { return; } #ifdef INTELS TLinAlg::MultiplyFS(const_cast<TVVec<TType, TSizeTy, ColMajor> &>(A), B, C); #else TSizeTy Nonzeros = B.Val1.Len(); IndexType MaxRowN = B.Val1[B.Val1.GetMxValN()]; IndexType MaxColN = B.Val2[B.Val2.GetMxValN()]; EAssert(A.GetRows() == C.GetRows() && (MaxColN + 1) <= C.GetCols() && (MaxRowN + 1) <= A.GetCols()); for (TSizeTy RowN = 0; RowN < A.GetRows(); RowN++) { for (TSizeTy ElN = 0; ElN < Nonzeros; ElN++) { C.At(RowN, B.Val2[ElN]) += A.At(RowN, B.Val1[ElN]) * B.Val3[ElN]; } } #endif } // TEST // C:= A' * B template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TTriple<TVec<IndexType, TSizeTy>, TVec<IndexType, TSizeTy>, TVec<TType, TSizeTy>>& B, TVVec<TType, TSizeTy, ColMajor>& C) { // B well defined EAssert(B.Val1.Len() == B.Val2.Len() && B.Val2.Len() == B.Val3.Len()); // Dimensions must match C.PutAll(0.0); if (B.Val1.Len() == 0) { return; } TSizeTy Nonzeros = B.Val1.Len(); IndexType MaxRowN = B.Val1[B.Val1.GetMxValN()]; IndexType MaxColN = B.Val2[B.Val2.GetMxValN()]; EAssert(A.GetCols() == C.GetRows() && (MaxColN + 1) <= C.GetCols() && (MaxRowN + 1) <= A.GetRows()); for (TSizeTy RowN = 0; RowN < A.GetCols(); RowN++) { for (TSizeTy ElN = 0; ElN < Nonzeros; ElN++) { C.At(RowN, B.Val2[ElN]) += A.At(B.Val1[ElN], RowN) * B.Val3[ElN]; } } } // TEST // C := A * B //#if !defined(INTEL) || defined(INDEX_64) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Multiply(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C) { // A well defined EAssert(A.Val1.Len() == A.Val2.Len() && A.Val2.Len() == A.Val3.Len()); // Dimensions must match C.PutAll(0.0); if (A.Val1.Len() == 0) { return; } #if !defined(INTEL) || defined(INDEX_64) TSizeTy Nonzeros = A.Val1.Len(); TSizeTy MaxRowN = A.Val1[A.Val1.GetMxValN()]; TSizeTy MaxColN = A.Val2[A.Val2.GetMxValN()]; EAssert(B.GetCols() == C.GetCols() && (MaxRowN + 1) <= C.GetRows() && (MaxColN + 1) <= B.GetRows()); for (TSizeTy ColN = 0; ColN < B.GetCols(); ColN++) { for (TSizeTy ElN = 0; ElN < Nonzeros; ElN++) { C.At(A.Val1[ElN], ColN) += A.Val3[ElN] * B.At(A.Val2[ElN], ColN); } } #else TLinAlg::MultiplySF(A, B, C); #endif } // TEST // C:= A' * B template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TTriple<TVec<TNum<TSizeTy>, TSizeTy>, TVec<TNum<TSizeTy>, TSizeTy>, TVec<TType, TSizeTy>>& A, const TVVec<TType, TSizeTy, ColMajor>& B, TVVec<TType, TSizeTy, ColMajor>& C) { // B well defined EAssert(A.Val1.Len() == A.Val2.Len() && A.Val2.Len() == A.Val3.Len()); // Dimensions must match C.PutAll(0.0); if (A.Val1.Len() == 0) { return; } #if !defined(INTEL) || defined(INDEX_64) TSizeTy Nonzeros = A.Val1.Len(); TSizeTy MaxRowN = A.Val1[A.Val1.GetMxValN()]; TSizeTy MaxColN = A.Val2[A.Val2.GetMxValN()]; EAssert(B.GetCols() == C.GetCols() && (MaxColN + 1) <= C.GetRows() && (MaxRowN + 1) <= B.GetRows()); for (TSizeTy ColN = 0; ColN < B.GetCols(); ColN++) { for (TSizeTy ElN = 0; ElN < Nonzeros; ElN++) { C.At(A.Val2[ElN], ColN) += A.Val3[ElN] * B.At(A.Val1[ElN], ColN); } } #else TLinAlg::MultiplySF(A, B, C, "T"); #endif } // DENSE-SPARSECOLMAT, SPARSECOLMAT-DENSE // C := A * B // DENSE-SPARSECOLMAT, SPARSECOLMAT-DENSE // C := A * B //Andrej Urgent //TODO template --- indextype TIntFltKdV ... TInt64 void TLinAlg::Multiply(const TFltVV& A, const TVec<TIntFltKdV>& B, TFltVV& C) { // B = sparse column matrix if (C.Empty()) { C.Gen(A.GetRows(), B.Len()); } else { EAssert(A.GetRows() == C.GetRows() && B.Len() == C.GetCols()); } EAssert(TLAMisc::GetMaxDimIdx(B) < A.GetCols()); int Cols = B.Len(); int Rows = A.GetRows(); C.PutAll(0.0); for (int RowN = 0; RowN < Rows; RowN++) { for (int ColN = 0; ColN < Cols; ColN++) { int Els = B[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { C.At(RowN, ColN) += A.At(RowN, B[ColN][ElN].Key) * B[ColN][ElN].Dat; } } } } // C:= A' * B template <class IndexType, class TType, class TSizeTy, bool ColMajor> void TLinAlg::MultiplyT(const TVVec<TType, TSizeTy, ColMajor>& A, const TVec<TVec<TKeyDat<IndexType, TType>, TSizeTy>, TSizeTy>& B, TVVec<TType, TSizeTy, ColMajor>& C) { // C = A' B = (B' A)' #ifdef INTELBETA TTriple<TVec<IndexType, TSizeTy>, TVec<TInt, TSizeTy>, TVec<TType, TSizeTy>> BB; TLinAlg::Convert(B, BB); // convert the matrix to a coordinate form TVVec<TType, TSizeTy, ColMajor> CC(B.Len(), A.GetCols()); TLinAlg::MultiplyT(BB, A, CC); if (C.Empty()) { C.Gen(A.GetCols(), B.Len()); } else { EAssert(C.GetRows() == A.GetCols() && C.GetCols() == B.Len()); } TLinAlg::Transpose(CC, C); #else // B = sparse column matrix if (C.Empty()) { C.Gen(A.GetCols(), B.Len()); } else { EAssert(A.GetCols() == C.GetRows() && B.Len() == C.GetCols()); } EAssert(TLAMisc::GetMaxDimIdx(B) < A.GetRows()); int Cols = B.Len(); int Rows = A.GetCols(); C.PutAll(0.0); for (int RowN = 0; RowN < Rows; RowN++) { for (int ColN = 0; ColN < Cols; ColN++) { int Els = B[ColN].Len(); for (int ElN = 0; ElN < Els; ElN++) { C.At(RowN, ColN) += A.At(B[ColN][ElN].Key, RowN) * B[ColN][ElN].Dat; } } } #endif } // C := A * B //Andrej Urgent //TODO template --- indextype TIntFltKdV ... TInt64 void TLinAlg::Multiply(const TVec<TIntFltKdV>& A, const TFltVV& B, TFltVV& C, const int RowsA) { // A = sparse column matrix EAssert(A.Len() == B.GetRows()); int Rows = RowsA; int ColsB = B.GetCols(); if (RowsA == -1) { Rows = TLAMisc::GetMaxDimIdx(A) + 1; } else { EAssert(TLAMisc::GetMaxDimIdx(A) + 1 <= RowsA); } if (C.Empty()) { C.Gen(Rows, ColsB); } int RowsB = B.GetRows(); C.PutAll(0.0); for (int ColN = 0; ColN < ColsB; ColN++) { for (int RowN = 0; RowN < RowsB; RowN++) { int Els = A[RowN].Len(); for (int ElN = 0; ElN < Els; ElN++) { C.At(A[RowN][ElN].Key, ColN) += A[RowN][ElN].Dat * B.At(RowN, ColN); } } } } // C:= A' * B //Andrej Urgent //TODO template --- indextype TIntFltKdV ... TInt64 TFlt void TLinAlg::MultiplyT(const TVec<TIntFltKdV>& A, const TFltVV& B, TFltVV& C) { // A = sparse column matrix EAssert(TLAMisc::GetMaxDimIdx(A) + 1 <= B.GetRows()); int ColsB = B.GetCols(); //int RowsB = B.GetRows(); int ColsA = A.Len(); if (C.Empty()) { C.Gen(ColsA, ColsB); } else { EAssert(C.GetRows() == ColsA && C.GetCols() == ColsB); } C.PutAll(0.0); for (int RowN = 0; RowN < ColsA; RowN++) { for (int ColN = 0; ColN < ColsB; ColN++) { int Els = A[RowN].Len(); for (int ElN = 0; ElN < Els; ElN++) { C.At(RowN, ColN) += A[RowN][ElN].Dat * B.At(A[RowN][ElN].Key, ColN); } } } } // SPARSECOLMAT-SPARSECOLMAT // C := A * B //Andrej Urgent //TODO template --- indextype TIntFltKdV ... TInt64 //TLAMisc //GetMaxDimIdx void TLinAlg::Multiply(const TVec<TIntFltKdV>& A, const TVec<TIntFltKdV>& B, TFltVV& C, const int RowsA) { //// A,B = sparse column matrix //EAssert(A.Len() == B.GetRows()); int Rows = RowsA; int ColsB = B.Len(); if (RowsA == -1) { Rows = TLAMisc::GetMaxDimIdx(A) + 1; } else { EAssert(TLAMisc::GetMaxDimIdx(A) + 1 <= RowsA); } if (C.Empty()) { C.Gen(Rows, ColsB); } EAssert(TLAMisc::GetMaxDimIdx(B) + 1 <= A.Len()); C.PutAll(0.0); for (int ColN = 0; ColN < ColsB; ColN++) { int ElsB = B[ColN].Len(); for (int ElBN = 0; ElBN < ElsB; ElBN++) { int IdxB = B[ColN][ElBN].Key; double ValB = B[ColN][ElBN].Dat; int ElsA = A[IdxB].Len(); for (int ElAN = 0; ElAN < ElsA; ElAN++) { int IdxA = A[IdxB][ElAN].Key; double ValA = A[IdxB][ElAN].Dat; C.At(IdxA, ColN) += ValA * ValB; } } } } // C:= A' * B //Andrej Urgent //TODO template --- indextype TIntFltKdV ... TInt64 void TLinAlg::MultiplyT(const TVec<TIntFltKdV>& A, const TVec<TIntFltKdV>& B, TFltVV& C) { //// A, B = sparse column matrix int ColsA = A.Len(); int ColsB = B.Len(); if (C.Empty()) { C.Gen(ColsA, ColsB); } else { EAssert(ColsA == C.GetRows() && ColsB == C.GetCols()); } for (int RowN = 0; RowN < ColsA; RowN++) { for (int ColN = 0; ColN < ColsB; ColN++) { C.At(RowN, ColN) = TLinAlg::DotProduct(A[RowN], B[ColN]); } } } //#ifdef INTEL // void TLinAlg::Multiply(const TFltVV & ProjMat, const TPair<TIntV, TFltV> & Doc, TFltV & Result); //#endif // TEST // D = alpha * A(') * B(') + beta * C(') typedef enum { GEMM_NO_T = 0, GEMM_A_T = 1, GEMM_B_T = 2, GEMM_C_T = 4 } TLinAlgGemmTranspose; template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Gemm(const double& Alpha, const TVVec<TType, TSizeTy, ColMajor>& A, const TVVec<TType, TSizeTy, ColMajor>& B, const double& Beta, const TVVec<TType, TSizeTy, ColMajor>& C, TVVec<TType, TSizeTy, ColMajor>& D, const int& TransposeFlags) { bool tA = (TransposeFlags & GEMM_A_T) == GEMM_A_T; bool tB = (TransposeFlags & GEMM_B_T) == GEMM_B_T; bool tC = (TransposeFlags & GEMM_C_T) == GEMM_C_T; // setting dimensions TSizeTy a_i = tA ? A.GetRows() : A.GetCols(); TSizeTy a_j = tA ? A.GetCols() : A.GetRows(); TSizeTy b_i = tB ? B.GetRows() : B.GetCols(); TSizeTy b_j = tB ? B.GetCols() : B.GetRows(); TSizeTy c_i = tC ? C.GetRows() : C.GetCols(); TSizeTy c_j = tC ? C.GetCols() : C.GetRows(); TSizeTy d_i = D.GetCols(); TSizeTy d_j = D.GetRows(); // assertions for dimensions EAssert(a_j == c_j && b_i == c_i && a_i == b_j && c_i == d_i && c_j == d_j); double Aij, Bij, Cij; // rows of D for (TSizeTy j = 0; j < a_j; j++) { // cols of D for (TSizeTy i = 0; i < b_i; i++) { // not optimized for speed - naive algorithm double sum = 0.0; // cols of A for (TSizeTy k = 0; k < a_i; k++) { Aij = tA ? A.At(k, j) : A.At(j, k); Bij = tB ? B.At(i, k) : B.At(k, i); sum += Alpha * Aij * Bij; } Cij = tC ? C.At(i, j) : C.At(j, i); sum += Beta * Cij; D.At(j, i) = sum; } } } // TEST (works only for RowMajor, TSvd uses only TFltVV matrices) // B = A^(-1) typedef enum { DECOMP_SVD } TLinAlgInverseType; template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Inverse(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor >& B, const TLinAlgInverseType& DecompType) { switch (DecompType) { case DECOMP_SVD: TLinAlg::InverseSVD(A, B); } } // subtypes of finding an inverse (works only for TFltVV, cuz of TSvd) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::InverseSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B, const double& tol) { // create temp matrices TVVec<TType, TSizeTy, ColMajor> U, V; TVec<TType, TSizeTy> E; TSvd SVD; //U.Gen(M.GetRows(), M.GetRows()); //V.Gen(M.GetCols(), M.GetCols()); U.Gen(A.GetRows(), A.GetRows()); V.Gen(A.GetCols(), A.GetCols()); // do the SVD decompostion SVD.Svd(A, U, E, V); // calculate reciprocal values for diagonal matrix = inverse diagonal for (TSizeTy i = 0; i < E.Len(); i++) { if (E[i] > tol) { E[i] = 1 / E[i]; } else { E[i] = 0.0; } } // calculate pseudoinverse: M^(-1) = V * E^(-1) * U' for (TSizeTy i = 0; i < U.GetCols(); i++) { for (TSizeTy j = 0; j < V.GetRows(); j++) { double sum = 0.0; for (TSizeTy k = 0; k < U.GetCols(); k++) { if (E[k] == 0.0) continue; sum += E[k] * V.At(i, k) * U.At(j, k); } B.At(i, j) = sum; } } } // subtypes of finding an inverse (works only for TFltVV, cuz of TSvd) template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::InverseSVD(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B) { // create temp matrices TVVec<TType, TSizeTy, ColMajor> U, V; TVec<TType, TSizeTy> E; TSvd SVD; //U.Gen(M.GetRows(), M.GetRows()); //V.Gen(M.GetCols(), M.GetCols()); U.Gen(A.GetRows(), A.GetRows()); V.Gen(A.GetCols(), A.GetCols()); // do the SVD decompostion SVD.Svd(A, U, E, V); // http://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse#Singular_value_decomposition_.28SVD.29 double tol = TFlt::Eps * MAX(A.GetRows(), A.GetCols()) * E[E.GetMxValN()]; // calculate reciprocal values for diagonal matrix = inverse diagonal for (TSizeTy i = 0; i < E.Len(); i++) { if (E[i] > tol) { E[i] = 1 / E[i]; } else { E[i] = 0.0; } } // calculate pseudoinverse: M^(-1) = V * E^(-1) * U' for (TSizeTy i = 0; i < U.GetCols(); i++) { for (TSizeTy j = 0; j < V.GetRows(); j++) { double sum = 0; for (TSizeTy k = 0; k < U.GetCols(); k++) { if (E[k] == 0.0) continue; sum += E[k] * V.At(i, k) * U.At(j, k); } B.At(i, j) = sum; } } } // transpose matrix - B = A' template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::Transpose(const TVVec<TType, TSizeTy, ColMajor>& A, TVVec<TType, TSizeTy, ColMajor>& B) { if (B.Empty()) { B.Gen(A.GetCols(), A.GetRows()); } EAssert(B.GetRows() == A.GetCols() && B.GetCols() == A.GetRows()); for (TSizeTy i = 0; i < A.GetCols(); i++) { for (TSizeTy j = 0; j < A.GetRows(); j++) { B.At(i, j) = A.At(j, i); } } } // performes Gram-Schmidt ortogonalization on elements of Q template <class TSizeTy> void TLinAlg::GS(TVec<TVec<TFlt, TSizeTy>, TSizeTy>& Q) { EAssert(Q.Len() > 0); TSizeTy m = Q.Len(); // int n = Q[0].Len(); for (TSizeTy i = 0; i < m; i++) { printf("%d\r", i); for (TSizeTy j = 0; j < i; j++) { double r = TLinAlg::DotProduct(Q[i], Q[j]); TLinAlg::AddVec(-r, Q[j], Q[i], Q[i]); } TLinAlg::Normalize(Q[i]); } printf("\n"); } // TEST // Gram-Schmidt on columns of matrix Q template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::GS(TVVec<TType, TSizeTy, ColMajor>& Q) { TSizeTy m = Q.GetCols(), n = Q.GetRows(); for (TSizeTy i = 0; i < m; i++) { printf("%d\r", i); for (TSizeTy j = 0; j < i; j++) { double r = TLinAlg::DotProduct(Q, i, Q, j); TLinAlg::AddVec(-r, Q, j, Q, i); } double nr = TLinAlg::Norm(Q, i); for (TSizeTy k = 0; k < n; k++) Q(k, i) = Q(k, i) / nr; } printf("\n"); } // Modified Gram-Schmidt on columns of matrix Q void TLinAlg::MGS(TFltVV& Q) { int Cols = Q.GetCols(), Rows = Q.GetRows(); EAssertR(Rows >= Cols, "TLinAlg::MGS: number of rows should be greater or equal to the number of cols"); for (int ColN = 0; ColN < Cols; ColN++) { TLinAlg::NormalizeColumns(Q); for (int ColN2 = ColN + 1; ColN2 < Cols; ColN2++) { double r = TLinAlg::DotProduct(Q, ColN, Q, ColN2); TLinAlg::AddVec(-r, Q, ColN, Q, ColN2); } } } // QR based on Modified Gram-Schmidt decomposition. void TLinAlg::QR(const TFltVV& X, TFltVV& Q, TFltVV& R, const TFlt& Tol) { int Rows = X.GetRows(); int Cols = X.GetCols(); int d = MIN(Rows, Cols); // make a copy of X TFltVV A(X); if (Q.GetRows() != Rows || Q.GetCols() != d) { Q.Gen(Rows, d); } if (R.GetRows() != d || R.GetCols() != Cols) { R.Gen(d, Cols); } TRnd Random; for (int k = 0; k < d; k++) { R(k, k) = TLinAlg::Norm(A, k); // if the remainders norm is too small we construct a random vector (handles rank deficient) if (R(k, k) < Tol) { // random Q(:,k) for (int RowN = 0; RowN < Rows; RowN++) { Q(RowN, k) = Random.GetNrmDev(); } // make it orthonormal on others for (int j = 0; j < k; j++) { TLinAlg::AddVec(-TLinAlg::DotProduct(Q, j, Q, k), Q, j, Q, k); } TLinAlg::NormalizeColumn(Q, k); R(k, k) = 0; } else { // normalize for (int RowN = 0; RowN < Rows; RowN++) { Q(RowN, k) = A(RowN, k) / R(k, k); } } // make the rest of the columns of A orthogonal to the current basis Q for (int j = k + 1; j < Cols; j++) { R(k, j) = TLinAlg::DotProduct(Q, k, A, j); TLinAlg::AddVec(-R(k, j), Q, k, A, j); } } } // rotates vector (OldX,OldY) for angle Angle (in radians!) void TLinAlg::Rotate(const double& OldX, const double& OldY, const double& Angle, double& NewX, double& NewY) { NewX = OldX*cos(Angle) - OldY*sin(Angle); NewY = OldX*sin(Angle) + OldY*cos(Angle); } // checks if set of vectors is ortogonal template <class TSizeTy> void TLinAlg::AssertOrtogonality(const TVec<TVec<TFlt, TSizeTy>, TSizeTy>& Vecs, const double& Threshold) { TSizeTy m = Vecs.Len(); for (TSizeTy i = 0; i < m; i++) { for (TSizeTy j = 0; j < i; j++) { double res = TLinAlg::DotProduct(Vecs[i], Vecs[j]); if (TFlt::Abs(res) > Threshold) printf("<%d,%d> = %.5f", i, j, res); } double norm = TLinAlg::Norm2(Vecs[i]); if (TFlt::Abs(norm - 1) > Threshold) printf("||%d|| = %.5f", i, norm); } } //ColMajor oriented data for optimal result template <class TType, class TSizeTy, bool ColMajor> void TLinAlg::AssertOrtogonality(const TVVec<TType, TSizeTy, ColMajor>& Vecs, const double& Threshold) { TSizeTy m = Vecs.GetCols(); for (TSizeTy i = 0; i < m; i++) { for (TSizeTy j = 0; j < i; j++) { double res = TLinAlg::DotProduct(Vecs, i, Vecs, j); if (TFlt::Abs(res) > Threshold) printf("<%d,%d> = %.5f", i, j, res); } double norm = TLinAlg::Norm2(Vecs, i); if (TFlt::Abs(norm - 1) > Threshold) printf("||%d|| = %.5f", i, norm); } printf("\n"); } bool TLinAlg::IsOrthonormal(const TFltVV& Vecs, const double& Threshold) { int m = Vecs.GetCols(); TFltVV R(m, m); TLinAlg::MultiplyT(Vecs, Vecs, R); for (int i = 0; i < m; i++) { R(i, i) -= 1; } return TLinAlg::Frob(R) < Threshold; } bool TLinAlg::IsZero(const TFltV& Vec) { int Len = Vec.Len(); for (int i = 0; i < Len; i++) { if (Vec[i] != 0.0) { return false; } } return true; } template <class TType, class TSizeTy, bool ColMajor> inline void TLinAlg::Pow(const TVVec<TType, TSizeTy, ColMajor>& Mat, const int& k, TVVec<TType, TSizeTy, ColMajor>& PowVV) { EAssertR(Mat.GetRows() == Mat.GetCols(), "TLinAlg::Pow: Can only compute powers of square matrices!"); const TSizeTy Dim = Mat.GetRows(); if (k == 0) { TLAUtil::Identity(Dim, PowVV); } else if (k < 0) { TVVec<TType, TSizeTy, ColMajor> InverseVV; TLinAlg::Inverse(Mat, InverseVV, TLinAlgInverseType::DECOMP_SVD); Pow(InverseVV, -k, PowVV); } else { PowVV.Gen(Dim, Dim); // we will compute the power using the binary algorithm // we will always hold the newest values in X, so when // finishing the algorithm, the result will be in X // X <- A TVVec<TType, TSizeTy, ColMajor> TempMat(Mat); // temporary matrix // pointers, so swapping is faster TVVec<TType, TSizeTy, ColMajor>* X = &TempMat; TVVec<TType, TSizeTy, ColMajor>* X1 = &PowVV; // use the space already available // temporary variables TVVec<TType, TSizeTy, ColMajor>* Temp; // do the work uint k1 = (uint) k; uint n = (uint) TMath::Log2(k); uint b; for (uint i = 1; i <= n; i++) { b = (k1 >> (n-i)) & 1; // X <- X*X TLinAlg::Multiply(*X, *X, *X1); // swap X and X1 so that X holds the content Temp = X1; X1 = X; X = Temp; if (b == 1) { // X <- X*A TLinAlg::Multiply(*X, Mat, *X1); // swap X and X1 so that X holds the content Temp = X1; X1 = X; X = Temp; } } if (&PowVV != X) { // the values are in X, but we are returning X1 // copy X to PowVV PowVV = *X; } } } //}; template <class TVal> TVal TLinAlg::GetColMin(const TVVec<TVal>& X, const int& ColN) { const int Rows = X.GetRows(); EAssertR(Rows > 0, "Input matrix should have at least one row!"); TVal MinVal = X(0, ColN); for (int RowN = 1; RowN < Rows; RowN++) { TVal Val = X(RowN, ColN); if (Val < MinVal) { MinVal = Val; } } return MinVal; } template <class TVal> void TLinAlg::GetColMinV(const TVVec<TVal>& X, TVec<TVal>& ValV) { const int Cols = X.GetCols(); ValV.Gen(Cols); for (int ColN = 0; ColN < Cols; ColN++) { ValV[ColN] = GetColMin(X, ColN); } } ////////////////////////////////////////////////////////////////////// // Numerical-Recipes-Exception class TNSException : public TExcept { public: TStr Message; public: TNSException(const TStr& Msg) : TExcept(Msg) {} TNSException(const TStr& MsgStr, const TStr& LocStr) : TExcept(MsgStr, LocStr) { } /// Create new numerical exception static PExcept New(const TStr& MsgStr, const TStr& LocStr = TStr()) { return PExcept(new TNSException(MsgStr, LocStr)); } }; ////////////////////////////////////////////////////////////////////// // Numerical-Linear-Algebra (copied from Numerical Recepies) class TNumericalStuff { private: static double sqr(double a); static double sign(double a, double b); // Computes (a^2 + b^2)^(1/2) without // destructive underflow or overflow. static double pythag(double a, double b); //displays error message to screen static void nrerror(const TStr& error_text); public: // Householder reduction of a real, symmetric matrix a[1..n][1..n]. // On output, a is replaced by the orthogonal matrix Q e ecting the // transformation. d[1..n] returns the diagonal elements of the // tridiagonal matrix, and e[1..n] the o -diagonal elements, with // e[1]=0. Several statements, as noted in comments, can be omitted // if only eigenvalues are to be found, in which case a contains no // useful information on output. Otherwise they are to be included. static void SymetricToTridiag(TFltVV& a, int n, TFltV& d, TFltV& e); // QL algorithm with implicit shifts, to determine the eigenvalues // and eigenvectors of a real, symmetric, tridiagonal matrix, or of // a real, symmetric matrix previously reduced by tred2 x11.2. On // input, d[1..n] contains the diagonal elements of the tridiagonal // matrix. On output, it returns the eigenvalues. The vector e[1..n] // inputs the subdiagonal elements of the tridiagonal matrix, with // e[1] arbitrary. On output e is destroyed. When finding only the // eigenvalues, several lines may be omitted, as noted in the comments. // If the eigenvectors of a tridiagonal matrix are desired, the matrix // z[1..n][1..n] is input as the identity matrix. If the eigenvectors // of a matrix that has been reduced by tred2 are required, then z is // input as the matrix output by tred2. In either case, the kth column // of z returns the normalized eigenvector corresponding to d[k]. static void EigSymmetricTridiag(TFltV& d, TFltV& e, int n, TFltVV& z); // Given a positive-dedinite symmetric matrix A(n,n), this routine // constructs its Cholesky decomposition, A = L * L^T . On input, only // the upper triangle of A need be given; it is not modified. The // Cholesky factor L is returned in the lower triangle of A, except for // its diagonal elements which are returned in p(n). static void CholeskyDecomposition(TFltVV& A, TFltV& p); // Solves the set of n linear equations A * x = b, where A is a // positive-definite symmetric matrix. A(n,n) and p[1..n] are input // as the output of the routine choldc. Only the lower triangle of A // is accessed. b(n) is input as the right-hand side vector. The // solution vector is returned in x(n). A and p are not modified and // can be left in place for successive calls with diferent right-hand // sides b. b is not modified unless you identify b and x in the calling // sequence, which is allowed. static void CholeskySolve(const TFltVV& A, const TFltV& p, const TFltV& b, TFltV& x); // Solves system of linear equations A * x = b, where A is symetric // positive-definite matrix. A is first decomposed using // CholeskyDecomposition and after solved using CholeskySolve. Only // upper triangle of A need be given and it is not modified. However, // lower triangle is modified! static void SolveSymetricSystem(TFltVV& A, const TFltV& b, TFltV& x); // solve system A x_i = e_i for i = 1..n, where A and p are output // from CholeskyDecomposition. Result is stored to upper triangule // (possible since inverse of symetric matrix is also symetric! Sigh...) static void InverseSubstitute(TFltVV& A, const TFltV& p); // Calculates inverse of symetric positiv definit matrix // Matrix is given as upper triangule of A, result is stored // in upper triangule of A. Lower triangule is random (actually // it has part of Choleksy decompositon of A) static void InverseSymetric(TFltVV& A); // calcualtes inverse of upper triagonal matrix A // lower triangle is messed up... static void InverseTriagonal(TFltVV& A); // Given a matrix a[1..n][1..n], this routine replaces it by the LU // decomposition of a rowwise permutation of itself. a and n are input. // a is output, arranged as in equation (2.3.14) above; indx[1..n] is // an output vector that records the row permutation efected by the partial // pivoting; d is output as +-1 depending on whether the number of row // interchanges was even or odd, respectively. This routine is used in // combination with lubksb to solve linear equations or invert a matrix. static void LUDecomposition(TFltVV& A, TIntV& indx, double& d); // Solves the set of n linear equations A*X = B. Here a[1..n][1..n] is input, // not as the matrix A but rather as its LU decomposition, determined by the // routine ludcmp. indx[1..n] is input as the permutation vector returned by // ludcmp. b[1..n] is input as the right-hand side vector B, and returns with // the solution vector X. a, n, and indx are not modified by this routine and // can be left in place for successive calls with diferent right-hand sides b. // This routine takes into account the possibility that b will begin with many // zero elements, so it is efficient for use in matrix inversion. static void LUSolve(const TFltVV& A, const TIntV& indx, TFltV& b); // Finds x[1...f] that minimizes ||A' x - y||^2 + ||Gamma x||^2, where A[1...f][1...n] // is a matrix with column training examples (rows = features) and y[1...n] is a // vector of targets. // Solves the primal problem if the number of features is lower than the number of examples, // or the dual problem in the other case. //Paramter Gamma controls overfitting (large values force models to be simpler) // See http://en.wikipedia.org/wiki/Tikhonov_regularization, where the regularization matrix = Gamma*I static void LeastSquares(const TFltVV& A, const TFltV& b, const double& kappa, TFltV& x); // Finds x[1...f] that minimizes ||A' x - y||^2 + ||Gamma x||^2, where A[1...f][1...n] // is a matrix with column training examples (rows = features) and y[1...n] is a // vector of targets. Paramter Gamma controls overfitting (large values force models to be simpler) // See http://en.wikipedia.org/wiki/Tikhonov_regularization, where the regularization matrix = Gamma*I static void PrimalLeastSquares(const TFltVV& A, const TFltV& b, const double& kappa, TFltV& x); // Finds x[1...f] that minimizes ||A' x - y||^2 + ||Gamma x||^2, where A[1...f][1...n] // is a matrix with column training examples (rows = features) and y[1...n] is a // vector of targets. Solves the dual version of the problem and exresses it in the // original coordinates in the end - suitable for cases, where the number of examples // is larger than the number of features. // Paramter Gamma controls overfitting (large values force models to be simpler) // See http://en.wikipedia.org/wiki/Tikhonov_regularization, where the regularization matrix = Gamma*I static void DualLeastSquares(const TFltVV& A, const TFltV& b, const double& kappa, TFltV& x); // Solves system of linear equations A * x = b. A is first decomposed using // LUDecomposition and after solved using LUSolve. A is modified! static void SolveLinearSystem(TFltVV& A, const TFltV& b, TFltV& x); // Computes the eigenvector of A belonging to the specified eigenvalue // uses the inverse iteration algorithm // the algorithms does modify A due to its use of LU decomposition static void GetEigenVec(const TFltVV& A, const double& EigenVal, TFltV& EigenV, const double& ConvergEps=1e-7); }; /////////////////////////////////////////////////////////////////////// // Sparse-SVD // Calculates singular-value-decompositon for sparse matrixes. // If A is a matrix than A is decomposed to A = U S V' // where S is diagonal with singular values on diagonal and U // and V are ortogonal (U'*U = V'*V = I). typedef enum { ssotNoOrto, ssotSelective, ssotFull } TSpSVDReOrtoType; class TSparseSVD { private: // Result = Matrix' * Matrix * Vec(:,ColId) static void MultiplyATA(const TMatrix& Matrix, const TFltVV& Vec, int ColId, TFltV& Result); // Result = Matrix' * Matrix * Vec static void MultiplyATA(const TMatrix& Matrix, const TFltV& Vec, TFltV& Result); public: // calculates NumEig eigen values of symetric matrix // if SvdMatrixProductP than matrix Matrix'*Matrix is used static void SimpleLanczos(const TMatrix& Matrix, const int& NumEig, TFltV& EigValV, const bool& DoLocalReortoP = false, const bool& SvdMatrixProductP = false); // fast, calculates NumEig largers eigen values and vectors // kk should be something like 4*NumEig // if SvdMatrixProductP than matrix Matrix'*Matrix is used static void Lanczos(const TMatrix& Matrix, int NumEig, int Iters, const TSpSVDReOrtoType& ReOrtoType, TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP = false); static void Lanczos2(const TMatrix& Matrix, int MaxNumEig, int MaxSecs, const TSpSVDReOrtoType& ReOrtoType, TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP = false); // calculates only singular values (based on SimpleLanczos) static void SimpleLanczosSVD(const TMatrix& Matrix, const int& CalcSV, TFltV& SngValV, const bool& DoLocalReortoP = false); // fast, calculates NumSV largers SV (based on Lanczos) static void LanczosSVD(const TMatrix& Matrix, int NumSV, int Iters, const TSpSVDReOrtoType& ReOrtoType, TFltV& SgnValV, TFltVV& LeftSgnVecVV, TFltVV& RightSgnVecVV); // slow - ortogonal iteration static void OrtoIterSVD(const TMatrix& Matrix, int NumSV, int IterN, TFltV& SgnValV); // slow - ortogonal iteration static void OrtoIterSVD(const TMatrix& Matrix, const int k, TFltV& S, TFltVV& U, TFltVV& V, const int Iters = 100, const double Tol = 1e-6); // projects sparse vector to space spanned by columns of matrix U static void Project(const TIntFltKdV& Vec, const TFltVV& U, TFltV& ProjVec); }; ////////////////////////////////////////////////////////////////////// // Sigmoid -- made by Janez(TM) // (y = 1/[1 + exp[-Ax+B]]) class TSigmoid { private: TFlt A; TFlt B; private: // Evaluates how well the sigmoid function fits the data. // J(A, B) = - ln prod_i P(Y = y_i | Z = z_i). The 'data' parameter // should contain (z_i, y_i) pairs. Smaller J means a better fit. static double EvaluateFit(const TFltIntKdV& data, const double A, const double B); // Computes not only J but also its partial derivatives. static void EvaluateFit(const TFltIntKdV& data, const double A, const double B, double& J, double& JA, double& JB); // Let J(lambda) = J(A + lambda U, B + lambda V). // This function computes J and its first and second derivatives. // They can be used to choose a good lambda (using Newton's method) // when minimizing J. -- This method has not been tested yet. static void EvaluateFit(const TFltIntKdV& data, const double A, const double B, const double U, const double V, const double lambda, double& J, double& JJ, double& JJJ); public: TSigmoid() { }; TSigmoid(const double& A_, const double& B_) : A(A_), B(B_) { }; // Tries to find a pair (A, B) that minimizes J(A, B). // Uses gradient descent. TSigmoid(const TFltIntKdV& data); TSigmoid(TSIn& SIn) { A.Load(SIn); B.Load(SIn); } void Load(TSIn& SIn) { A.Load(SIn); B.Load(SIn); } void Save(TSOut& SOut) const { A.Save(SOut); B.Save(SOut); } double GetVal(const double& x) const { return 1.0 / (1.0 + exp(-A * x + B)); } double operator()(const double& x) const { return GetVal(x); } void GetSigmoidAB(double& A_, double& B_) { A_ = A; B_ = B; } }; class TFullMatrix; ///////////////////////////////////////////////////////////////////////// //// Full-Vector class TVector { friend class TFullMatrix; public: bool IsColVector; TFltV Vec; public: TVector(const bool& IsColVector = true); TVector(const int& Dim, const bool IsColVector = true); TVector(const TFltV& Vect, const bool IsColVector = true); TVector(const TIntV& Vect, const bool IsColVector = true); TVector(const TFullMatrix& Mat); // copy constructor TVector(const TVector& Vector); #ifdef GLib_CPP11 // Move constructor TVector(const TVector&& Vector); #endif // Move assignment TVector& operator=(TVector Vector); // returns a new zero vector static TVector Init(const int& Dim, const bool _IsColVect); // returns a vector of ones static TVector Ones(const int& Dim, const bool IsColVect = true); // returns a vector of zeros static TVector Zeros(const int& Dim, const bool IsColVec = true); // returns a vector with a sequence starting with Start (inclusive) and ending // with End (exclusive) static TVector Range(const int& Start, const int& End, const bool IsColVect = true); // returns a vector with a sequence starting with 0 (inclusive) and ending // with End (exclusive) static TVector Range(const int& End, const bool IsColVect = true); void Add(const double& Val) { Vec.Add(Val); } void DelLast() { Vec.DelLast(); } // returns true if the vectors have the same orientation and the elements are the same bool operator ==(const TVector& Vect) const; // returns the element at index Idx TFlt& operator [](const int& Idx) { return Vec[Idx]; } const TFlt& operator [](const int& Idx) const { return Vec[Idx]; } TVector GetT() const; TVector& Transpose(); double DotProduct(const TFltV& y) const; double DotProduct(const TVector& y) const; // multiplication TFullMatrix operator *(const TVector& y) const; TVector operator *(const TFullMatrix& Mat) const; TVector operator *(const double& k) const; // multiplies all elements by Lambda TVector& operator *=(const double& Lambda); // division // divides all elements by Lambda TVector operator /(const double& Lambda) const; // divides all elements by Lambda TVector& operator /=(const double& Lambda); // multiply the transpose of this vector with B (e.g. x'*B) TVector MulT(const TFullMatrix& B) const; // addition TVector operator +(const TVector& y) const; TVector& operator +=(const TVector& y); // subtraction TVector operator -(const TVector& y) const; public: int Len() const { return Vec.Len(); } bool IsColVec() const { return IsColVector; } bool IsRowVec() const { return !IsColVec(); } bool Empty() const { return Vec.Empty(); } template<typename TFunc> TVector& Map(const TFunc& Func); // applies sqrt on all elements of this matrix TVector& Sqrt() { return Map([](TFlt Val) { return sqrt(Val); }); } // returns a vector containing indexes of all the elements satisfying a condition template<typename TFunc> TVector Find(const TFunc& Func) const; template<typename TFunc, typename TRes> void Find(const TFunc& Func, TRes& Res) const; // returns the 'euclidian' L2 norm double Norm() const; // returns the squared 'euclidian' L2 norm double Norm2() const; // returns the sum of elements double Sum() const; // returns the euclidean distance to the other vector double EuclDist(const TVector& y) const; // returns the underlying list const TFltV& GetVec() const { return Vec; } // returns the underlying list TFltV& GetVec() { return Vec; } // returns this vector as a list of integers TIntV GetIntVec() const; double GetMaxVal() const; // returns the index of the maximum element int GetMaxIdx() const; // returns the index and value of the maximum element TIntFltPr GetMax() const; // returns the index of the minimum element int GetMinIdx() const; void Save(TSOut& SOut) const { TBool(IsColVector).Save(SOut); Vec.Save(SOut); } void Load(TSIn& SIn) { IsColVector = TBool(SIn); Vec.Load(SIn); } }; template <typename TFunc> TVector& TVector::Map(const TFunc& Func) { const int& Dim = Len(); for (int i = 0; i < Dim; i++) { Vec[i] = Func(Vec[i]); } return *this; } template <typename TFunc> TVector TVector::Find(const TFunc& Func) const { TVector Res; Find(Func, Res); return Res; } template <typename TFunc, typename TRes> void TVector::Find(const TFunc& Func, TRes& Res) const { const int& Dim = Len(); for (int i = 0; i < Dim; i++) { if (Func(Vec[i])) { Res.Add(i); } } } ///////////////////////////////////////////////////////////////////////// //// Full-Matrix typedef TTriple<TFullMatrix, TFullMatrix, TFullMatrix> TFullMatrixTr; typedef TTriple<TFullMatrix, TVector, TFullMatrix> TMatVecMatTr; class TFullMatrix : public TMatrix { friend class TVector; private: bool IsWrapper; TFltVV* Mat; public: // constructors/destructors // empty matrix with 0 rows and 0 cols TFullMatrix(); // zero matrix with the specified number of rows and cols TFullMatrix(const int& Rows, const int& Cols); // matrix from TFltVV, if IsWrapper is set to true then the // underlying matrix will not be deleted TFullMatrix(TFltVV& Mat, const bool IsWrapper); TFullMatrix(const TFltVV& Mat); // matrix from vector TFullMatrix(const TVector& Vec); // copy constructor TFullMatrix(const TFullMatrix& Mat); #ifdef GLib_CPP11 // move constructor TFullMatrix(TFullMatrix&& Mat); #endif private: // wraps the matrix and takes control of all the cleanup TFullMatrix(TFltVV* Mat); public: // destructor virtual ~TFullMatrix(); // copy constructor TFullMatrix& operator =(const TFullMatrix& Mat); // move constructor TFullMatrix& operator =(TFullMatrix&& _Mat); // identity matrix static TFullMatrix Identity(const int& Dim); // matrix from TVec<TFltV>, each element from the list goes into one row static TFullMatrix RowMatrix(const TVec<TFltV>& Mat); // matrix from TVec<TFltV>, each element from the list goes into one column static TFullMatrix ColMatrix(const TVec<TFltV>& Mat); // get a matrix with the values from the vector are diagonal elements static TFullMatrix Diag(const TVector& Diag); private: void Clr(); protected: virtual void PMultiply(const TFltVV& B, int ColId, TFltV& Result) const; virtual void PMultiply(const TFltV& Vec, TFltV& Result) const; virtual void PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const; virtual void PMultiplyT(const TFltV& Vec, TFltV& Result) const; virtual void PMultiply(const TFltVV& B, TFltVV& Result) const; virtual void PMultiplyT(const TFltVV& B, TFltVV& Result) const; // getters virtual int PGetRows() const { return Mat->GetRows(); } virtual int PGetCols() const { return Mat->GetCols(); } public: // returns the underlying TFltVV const TFltVV& GetMat() const { return *Mat; } // returns the underlying TFltVV TFltVV& GetMat() { return *Mat; } // transposed virtual void Transpose(); // returns the transpose of this matrix TFullMatrix GetT() const; void GetT(TFltVV& TransposedVV) const; // returns the value at position (i,j) TFlt& At(const int& i, const int& j) { return Mat->operator ()(i, j); } const TFlt& At(const int& i, const int& j) const { return Mat->operator ()(i, j); } // sets the value at position (i,j) void Set(const double& Val, const int& i, const int& j) { Mat->operator ()(i, j) = Val; } // returns true if the matrix is empty bool Empty() const { return Mat->Empty(); } TFullMatrix& AddCol(const TFltV& Col); TFullMatrix& AddCol(const TVector& Col); TFullMatrix& AddCols(const TFullMatrix& Cols); // operators TFlt& operator ()(const int& i, const int& j) { return At(i,j); } const TFlt& operator ()(const int& i, const int& j) const { return At(i,j); } // returns a submatrix specified by RowV and ColV template<class TIdxV1, class TIdxV2> TFullMatrix operator ()(const TIdxV1& RowV, const TIdxV2& ColV) const; template<class TIdxV> TVector operator ()(const int& RowIdx, const TIdxV& ColV) const; // adds matrix B and returns itself TFullMatrix& operator +=(const TFullMatrix& B); // subtracts matrix B and returns itself TFullMatrix& operator -=(const TFullMatrix& B); // add/subtract TFullMatrix operator +(const TFullMatrix& B) const; TFullMatrix operator -(const TFullMatrix& B) const; // multiply TFullMatrix operator *(const TFullMatrix& B) const; TFullMatrix operator *(const TSparseColMatrix& B) const; // multiply the transpose of this matrix with B (e.g. A'*B) TFullMatrix MulT(const TFullMatrix& B) const; TFullMatrix MulT(const TFltVV& B) const; // multiplies this matrix with a vector TVector operator *(const TVector& x) const; // multiplies this matrix with a vector represented as TFltV // ignores the vectors orientation TVector operator *(const TFltV& x) const; // scalars // multiplies this matrix by a scalar and returns the result TFullMatrix operator *(const double& Lambda) const; // divides this matrix by a scalar and returns the result TFullMatrix operator /(const double& Lambda) const; // returns the power of this matrix A^n where A is this matrix and n is the argument TFullMatrix Pow(const int& k) const; TFullMatrix operator ^(const int& k) const { return Pow(k); }; // returns the RowIdx-th row TVector GetRow(const int& RowIdx) const; // returns the ColIdx-th column TVector GetCol(const int& ColIdx) const; void SetRow(const int& RowIdx, const TVector& RowV); void SetCol(const int& ColIdx, const TVector& ColV); // applies an element-wise operation on this matrix and returns the matrix itself template<typename TFunc> TFullMatrix& Map(const TFunc& Func); // applies sqrt on all elements of this matrix TFullMatrix& Sqrt() { return Map([](TFlt Val) { return sqrt(Val); }); } // returns the L2 norm of the specified column double ColNorm(const int& ColIdx) const; // returns the squared L2 norm of the specified column double ColNorm2(const int& ColIdx) const; // returns the L2 norm of each column and returns them in a row vector TVector ColNormV() const; // returns the squared L2 norm of each column and returns them in a row vector TVector ColNorm2V() const; // returns the Frobenius norm of this matrix double FromNorm() const; // returns the norm of the i-th row double RowNormL1(const int& i) const; // normalizes the rows using L1 norm void NormalizeRowsL1(); // returns the sum of the i-th row double RowSum(const int& i) const; // returns a vector containing the sum of rows TVector RowSumV() const; // returns a vector containing the minimum values of each column TVector GetColMinV() const; // returns the index of the maximum element in each column in a row vector TVector GetColMaxIdxV() const; // returns the index of the minimum element in each column in a row vector TVector GetColMinIdxV() const; // transforms the rows of the matrix to have mean 0 TFullMatrix& CenterRows(); // returns a matrix which has rows centered around zero (check CenterRows) TFullMatrix GetCenteredRows() const; // computes the singular value decomposition if this matrix X = U*S*V' // returns a triple where U is stored in the first value, S is stored as a vector // in the second value and V is stored in the third value // k represents the number of singular values that are computed TMatVecMatTr Svd(const int& k) const; TMatVecMatTr Svd() const { return Svd(TMath::Mn(GetRows(), GetCols())); } // returns the inverse of this matrix TFullMatrix GetInverse() const; bool HasNan() const; public: void Save(TSOut& SOut) const; void Load(TSIn& SIn); }; template <class TIdxV1, class TIdxV2> TFullMatrix TFullMatrix::operator ()(const TIdxV1& RowV, const TIdxV2& ColV) const { const int Rows = RowV.Len(); const int Cols = ColV.Len(); TFullMatrix Result(Rows, Cols); for (int i = 0; i < Rows; i++) { for (int j = 0; j < Cols; j++) { const int Idx1 = (int) RowV[i]; const int Idx2 = (int) ColV[j]; const TFlt Val = Mat->At(Idx1, Idx2); Result.Mat->PutXY(i, j, Val); } } return Result; } template <class TIdxV> TVector TFullMatrix::operator ()(const int& RowIdx, const TIdxV& ColIdxV) const { EAssertR(RowIdx < GetRows(), TStr::Fmt("Invalid row index: %d", RowIdx)); const int Cols = ColIdxV.Len(); TVector Result(Cols, false); for (int ColIdx = 0; ColIdx < Cols; ColIdx++) { Result[ColIdx] = At(RowIdx, ColIdx); } return Result; } template <typename TFunc> TFullMatrix& TFullMatrix::Map(const TFunc& Func) { const int& Rows = GetRows(); const int& Cols = GetCols(); for (int i = 0; i < Rows; i++) { for (int j = 0; j < Cols; j++) { Mat->At(i, j) = Func(Mat->At(i, j)); } } return *this; } #ifdef LAPACKE #include "MKLfunctions.h" #endif #endif
GxB_BinaryOp_xtype.c
//------------------------------------------------------------------------------ // GxB_BinaryOp_xtype: return the type of x for z=f(x,y) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // NOTE: this function is historical. Use GxB_BinaryOp_xtype_name instead. #include "GB.h" GrB_Info GxB_BinaryOp_xtype // type of x ( GrB_Type *xtype, // return type of input x GrB_BinaryOp binaryop // binary operator to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_BinaryOp_xtype (&xtype, binaryop)") ; GB_RETURN_IF_NULL (xtype) ; GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ; ASSERT_BINARYOP_OK (binaryop, "binaryop for xtype", GB0) ; //-------------------------------------------------------------------------- // return the xtype //-------------------------------------------------------------------------- (*xtype) = binaryop->xtype ; #pragma omp flush return (GrB_SUCCESS) ; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); int A[N], B[N], C[N], D[N], E[N]; int S[N]; INIT(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int max_threads = 224; // // Test: lastprivate clause on omp for. // for (int t = 0; t <= max_threads; t++) { int threads = t; TEST({ S[0] = 0; double q0; double q1; double q2; double q3; double q4; double q5; double q6; double q7; double q8; double q9; q0 = q1 = q2 = q3 = q4 = q5 = q6 = q7 = q8 = q9 = 0; _Pragma("omp parallel if(threads > 1) num_threads(threads)") { _Pragma("omp for lastprivate(conditional: q0)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q0 = C[i] + D[i]; A[i] += q0; } _Pragma("omp for schedule(auto) lastprivate(conditional: q1)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q1 = C[i] + D[i]; B[i] += q1; } _Pragma("omp for schedule(dynamic) lastprivate(conditional: q2)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q2 = C[i] + D[i]; A[i] += q2; } _Pragma("omp for schedule(guided) lastprivate(conditional: q3)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q3 = C[i] + D[i]; B[i] += q3; } _Pragma("omp for schedule(runtime) lastprivate(conditional: q4)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q4 = C[i] + D[i]; A[i] += q4; } _Pragma("omp for schedule(static) lastprivate(conditional: q5)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q5 = C[i] + D[i]; B[i] += q5; } _Pragma("omp for schedule(static,1) lastprivate(conditional: q6)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q6 = C[i] + D[i]; A[i] += q6; } _Pragma("omp for schedule(static,9) lastprivate(conditional: q7)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q7 = C[i] + D[i]; B[i] += q7; } _Pragma("omp for schedule(static,13) lastprivate(conditional: q8)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q8 = C[i] + D[i]; A[i] += q8; } _Pragma("omp for schedule(static,30000) lastprivate(conditional: q9)") for (int i = 0; i < N; i++) { if (D[i] % 10 == 0) q9 = C[i] + D[i]; B[i] += q9; } } double tmp = q0 + q1 + q2 + q3 + q4 + \ q5 + q6 + q7 + q8 + q9; S[0] = tmp; }, VERIFY(0, 1, S[0], 30710 )); } return 0; }
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 8192 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 1 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
task-3.c
/* { dg-do run } */ #include <omp.h> extern void abort (); int l = 5; int foo (int i) { int j = 7; const int k = 8; #pragma omp task firstprivate (i) shared (j, l) { #pragma omp critical { j += i; l += k; } } i++; #pragma omp task firstprivate (i) shared (j, l) { #pragma omp critical { j += i; l += k; } } i++; #pragma omp task firstprivate (i) shared (j, l) { #pragma omp critical { j += i; l += k; } } i++; #pragma omp task firstprivate (i) shared (j, l) { #pragma omp critical { j += i; l += k; } } i++; #pragma omp taskwait return (i != 8 * omp_get_thread_num () + 4 || j != 4 * i - 3 || k != 8); } int main (void) { int r = 0; #pragma omp parallel num_threads (4) reduction(+:r) if (omp_get_num_threads () != 4) { #pragma omp master l = 133; } else if (foo (8 * omp_get_thread_num ())) r++; if (r || l != 133) abort (); return 0; }
nary-search.c
#include <stdio.h> #include <math.h> #include <omp.h> void main() { int sep[20], array[20], key, i, j, n, left, right, size, interval, index, break_value = 0, tid; printf("Enter the size of array\n"); scanf("%d", &size); printf("Enter the elements of array in ascending order\n"); for (i = 0; i < size; i++) { scanf("%d", &array[i]); } printf("Enter the key to be searched\n"); scanf("%d", &key); printf("Enter the value of n for n-ary search algorithm\n"); scanf("%d", &n); left = 0; right = size - 1; if (key >= array[left] && key <= array[right]) { while (left != right) { // (start) code to find seperators printf("left=%d, right=%d, size=%d\n", left, right, size); if (size <= n) { #pragma omp parallel for num_threads(size) for (i = 0; i < size; i++) { sep[i] = left + i; tid = omp_get_thread_num(); printf("Thread %d allocated sep[%d]=%d\n", tid, i, sep[i]); } } else { sep[0] = left; interval = ceil((float)size / (float)n); #pragma omp parallel for num_threads(n - 1) for (i = 1; i <= n - 1; i++) { sep[i] = left + interval * i - 1; tid = omp_get_thread_num(); printf("Thread %d allocated sep[%d]=%d\n", tid, i, sep[i]); } sep[n] = right; } // (end) Code to find seperators // (start) Code for comparison for (i = 0; i <= n; i++) { if (key == array[sep[i]]) { index = sep[i]; printf("Element found at position %d\n", index + 1); break_value = 1; break; } if (key < array[sep[i]]) { right = sep[i]; if (i != 0) left = 1 + sep[i - 1]; size = right - left + 1; break; } } // (end) Code for comparison if (break_value == 1) break; } //End of 'while' loop } //End of 'if' if (left == right || !(key >= array[left] && key <= array[right])) printf("Element does not present in the list\n"); return 0; } //End of main() // For compilation : gcc - fopenmp nary - search.c - lm // (Note: -fopenmp is used to use "OpenMP" library while -lm is used to use "math.h"). // To run: // ./a.out
DRB012-minusminus-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The -- operation is not protected, causing race condition. Data race pair: numNodes2@75 vs. numNodes2@75 */ #include <stdlib.h> int main(int argc, char * argv[]) { int i; int len = 100; int numNodes = len, numNodes2 = 0; int x[len]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { if ((i%2)==0) { x[i]=5; } else { x[i]=( - 5); } } #pragma cetus private(i) #pragma loop name main#1 #pragma cetus reduction(+: numNodes2) #pragma cetus parallel #pragma omp parallel for private(i) reduction(+: numNodes2) for (i=(numNodes-1); i>( - 1); -- i) { if (x[i]<=0) { numNodes2+=( - 1); } } printf("%d\n", numNodes2); _ret_val_0=0; return _ret_val_0; }
O6precIndxDb.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_grad; extern GVAL *restrict * restrict * restrict gv_precInd; extern int *restrict t6Blk; extern int *restrict t6Ver; extern int *restrict t6Ind; void O6precIndxDb(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { int i = block_index * g->height * g->blkSize + height_index * g->blkSize + edge_index; gv_precInd[t6Blk[i]][t6Ver[i]][t6Ind[i]] = 0.5 * gv_grad->data_pointer.p3[(block_index)][(height_index)][(edge_index)]; } } } } }
VerletClusterCellsTraversal.h
/** * @file VerletClusterCellsTraversal.h * @author jspahl * @date 25.3.19 */ #pragma once #include <algorithm> #include <vector> #include "VerletClusterTraversalInterface.h" #include "autopas/containers/cellPairTraversals/CellPairTraversal.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/pairwiseFunctors/CellFunctor.h" #include "autopas/utils/CudaDeviceVector.h" #include "autopas/utils/SoAView.h" #if defined(AUTOPAS_CUDA) #include "cuda_runtime.h" #endif namespace autopas { /** * This Traversal is used to interact all clusters in VerletClusterCluster Container. * * @tparam ParticleCell the type of cells * @tparam PairwiseFunctor The functor that defines the interaction of two particles. * @tparam DataLayout * @tparam useNewton3 */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> class VerletClusterCellsTraversal : public CellPairTraversal<ParticleCell>, public VerletClusterTraversalInterface<ParticleCell> { using Particle = typename ParticleCell::ParticleType; public: /** * Constructor for the VerletClusterClusterTraversal. * @param pairwiseFunctor The functor that defines the interaction of two particles. * @param clusterSize Size of the clusters. */ VerletClusterCellsTraversal(PairwiseFunctor *pairwiseFunctor, const unsigned int clusterSize) : CellPairTraversal<ParticleCell>({1, 1, 1}), _functor(pairwiseFunctor), _neighborMatrixDim(nullptr), _clusterSize(clusterSize) {} TraversalOption getTraversalType() const override { return TraversalOption::verletClusterCells; } bool isApplicable() const override { if (dataLayout == DataLayoutOption::cuda) { int nDevices = 0; #if defined(AUTOPAS_CUDA) cudaGetDeviceCount(&nDevices); if (not _functor->getCudaWrapper()) return false; #endif return nDevices > 0 and _functor->isAppropriateClusterSize(_clusterSize, dataLayout); } else { return _functor->isAppropriateClusterSize(_clusterSize, dataLayout); } } bool getUseNewton3() const override { return useNewton3; } DataLayoutOption getDataLayout() const override { return dataLayout; } std::tuple<TraversalOption, DataLayoutOption, bool> getSignature() override { return std::make_tuple(TraversalOption::verletClusterCells, dataLayout, useNewton3); } void setVerletListPointer(std::vector<std::vector<std::vector<std::pair<size_t, size_t>>>> *neighborCellIds, size_t *neighborMatrixDim, utils::CudaDeviceVector<unsigned int> *neighborMatrix) override { _neighborCellIds = neighborCellIds; _neighborMatrixDim = neighborMatrixDim; _neighborMatrix = neighborMatrix; } void rebuildVerlet(const std::array<unsigned long, 3> &dims, std::vector<ParticleCell> &cells, std::vector<std::vector<std::array<double, 6>>> &boundingBoxes, int interactionCellRadius, double distance) override { this->_cellsPerDimension = dims; const size_t cellsSize = cells.size(); _neighborCellIds->clear(); _neighborCellIds->resize(cellsSize, {}); // iterate over all cells within interaction radius in xy plane for (size_t i = 0; i < cellsSize; ++i) { auto pos = utils::ThreeDimensionalMapping::oneToThreeD(i, this->_cellsPerDimension); for (int x = -interactionCellRadius; x <= interactionCellRadius; ++x) { if (0 <= (pos[0] + x) and (pos[0] + x) < this->_cellsPerDimension[0]) { for (int y = -interactionCellRadius; y <= interactionCellRadius; ++y) { if (0 <= (pos[1] + y) and (pos[1] + y) < this->_cellsPerDimension[1]) { // current neighbor cell auto other = utils::ThreeDimensionalMapping::threeToOneD(pos[0] + x, pos[1] + y, (unsigned long)0, this->_cellsPerDimension); // only one way interaction when using newton3 if (useNewton3 and other > i) { continue; } // iterate through clusters in own cell for (size_t ownClusterId = 0; ownClusterId < boundingBoxes[i].size(); ++ownClusterId) { (*_neighborCellIds)[i].resize(boundingBoxes[i].size(), {}); const std::array<double, 6> ownBox = boundingBoxes[i][ownClusterId]; // find range of clusters in other cell within range of own cluster auto start = std::find_if(boundingBoxes[other].begin(), boundingBoxes[other].end(), [this, ownBox, distance](const std::array<double, 6> &otherbox) { return getMinDist(ownBox, otherbox) < distance; }); auto end = std::find_if(start, boundingBoxes[other].end(), [this, ownBox, distance](const std::array<double, 6> &otherbox) { return getMinDist(ownBox, otherbox) > distance; }); const size_t size = end - start; if (start != end) { (*_neighborCellIds)[i][ownClusterId].reserve(size); auto indexStart = start - boundingBoxes[other].begin(); if (other == i) { // add clusters to neighbor list when within same cell for (size_t k = 0; k < size; ++k) { if (useNewton3) { if (indexStart + k > ownClusterId) { (*_neighborCellIds)[i][ownClusterId].push_back(std::make_pair(other, indexStart + k)); } } else { if (indexStart + k != ownClusterId) { (*_neighborCellIds)[i][ownClusterId].push_back(std::make_pair(other, indexStart + k)); } } } } else { // add clusters to neighbor list in the form [mycell][mycluster] pair(othercell, othercluster) for (size_t k = 0; k < size; ++k) { (*_neighborCellIds)[i][ownClusterId].push_back(std::make_pair(other, indexStart + k)); } } } } } } } } } // Make neighbor matrix for GPU by linearizing _neighborCellIds if (dataLayout == DataLayoutOption::cuda) { size_t neighborMatrixDim = 0; for (auto &cell : *_neighborCellIds) { for (auto &cluster : cell) { neighborMatrixDim = std::max(neighborMatrixDim, cluster.size()); } } ++neighborMatrixDim; if (not useNewton3) { ++neighborMatrixDim; } *_neighborMatrixDim = neighborMatrixDim; std::vector<size_t> cellSizePartSums(cellsSize + 1, 0); for (size_t i = 0; i < cellsSize; ++i) { cellSizePartSums[i + 1] = boundingBoxes[i].size() + cellSizePartSums[i]; } std::vector<unsigned int> neighborMatrix(cellSizePartSums.back() * neighborMatrixDim, std::numeric_limits<unsigned int>::max()); for (size_t cell = 0; cell < cellsSize; ++cell) { for (size_t cluster = 0; cluster < (*_neighborCellIds)[cell].size(); ++cluster) { size_t i = 0; for (auto &neighbors : (*_neighborCellIds)[cell][cluster]) { neighborMatrix[(cellSizePartSums[cell] + cluster) * neighborMatrixDim + i] = cellSizePartSums[neighbors.first] + neighbors.second; ++i; } if (not useNewton3) { neighborMatrix[(cellSizePartSums[cell] + cluster) * neighborMatrixDim + i] = cellSizePartSums[cell] + cluster; ++i; } } } #ifdef AUTOPAS_CUDA _neighborMatrix->copyHostToDevice(neighborMatrix.size(), neighborMatrix.data()); #endif } } void initTraversal() override { switch (dataLayout) { case DataLayoutOption::aos: { return; } case DataLayoutOption::soa: { for (size_t i = 0; i < (*this->_cells).size(); ++i) { _functor->SoALoader((*this->_cells)[i], (*this->_cells)[i]._particleSoABuffer); } return; } case DataLayoutOption::cuda: { size_t partSum = 0; for (size_t i = 0; i < (*this->_cells).size(); ++i) { _functor->SoALoader((*this->_cells)[i], _storageCell._particleSoABuffer, partSum); partSum += (*this->_cells)[i].numParticles(); } _functor->deviceSoALoader(_storageCell._particleSoABuffer, _storageCell._particleSoABufferDevice); #ifdef AUTOPAS_CUDA utils::CudaExceptionHandler::checkErrorCode(cudaDeviceSynchronize()); #endif return; } } } void endTraversal() override { switch (dataLayout) { case DataLayoutOption::aos: { return; } case DataLayoutOption::soa: { #ifdef AUTOPAS_OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < (*this->_cells).size(); ++i) { _functor->SoAExtractor((*this->_cells)[i], (*this->_cells)[i]._particleSoABuffer); } return; } case DataLayoutOption::cuda: { _functor->deviceSoAExtractor(_storageCell._particleSoABuffer, _storageCell._particleSoABufferDevice); #ifdef AUTOPAS_CUDA utils::CudaExceptionHandler::checkErrorCode(cudaDeviceSynchronize()); #endif size_t partSum = 0; for (size_t i = 0; i < (*this->_cells).size(); ++i) { _functor->SoAExtractor((*this->_cells)[i], _storageCell._particleSoABuffer, partSum); partSum += (*this->_cells)[i].numParticles(); } return; } } } void traverseParticlePairs() override { switch (dataLayout) { case DataLayoutOption::aos: { traverseCellPairsAoS(this->_cells); return; } case DataLayoutOption::soa: { traverseCellPairsSoA(this->_cells); return; } case DataLayoutOption::cuda: { traverseCellPairsGPU(); return; } } } private: void traverseCellPairsAoS(std::vector<ParticleCell> *cells) { const auto clusterSize = _clusterSize; // grid for (size_t i = 0; i < cells->size(); ++i) { // clusters for (size_t clusterId = 0; clusterId < (*_neighborCellIds)[i].size(); ++clusterId) { for (auto &neighbor : (*_neighborCellIds)[i][clusterId]) { // loop in cluster for (size_t ownPid = 0; ownPid < clusterSize; ++ownPid) { for (size_t otherPid = 0; otherPid < clusterSize; ++otherPid) { _functor->AoSFunctor((*cells)[i]._particles[clusterSize * clusterId + ownPid], (*cells)[neighbor.first]._particles[clusterSize * neighbor.second + otherPid], useNewton3); } } } // same cluster if (useNewton3) { for (size_t ownPid = 0; ownPid < clusterSize; ++ownPid) { for (size_t otherPid = ownPid + 1; otherPid < clusterSize; ++otherPid) { _functor->AoSFunctor((*cells)[i]._particles[clusterSize * clusterId + ownPid], (*cells)[i]._particles[clusterSize * clusterId + otherPid], useNewton3); } } } else { for (size_t ownPid = 0; ownPid < clusterSize; ++ownPid) { for (size_t otherPid = 0; otherPid < clusterSize; ++otherPid) { if (ownPid != otherPid) { _functor->AoSFunctor((*cells)[i]._particles[clusterSize * clusterId + ownPid], (*cells)[i]._particles[clusterSize * clusterId + otherPid], useNewton3); } } } } } } } void traverseCellPairsSoA(std::vector<ParticleCell> *cells) { auto clusterSize = _clusterSize; // grid for (size_t i = 0; i < cells->size(); ++i) { // clusters for (size_t clusterId = 0; clusterId < (*_neighborCellIds)[i].size(); ++clusterId) { for (auto &neighbor : (*_neighborCellIds)[i][clusterId]) { const size_t c1start = clusterSize * clusterId; SoAView cluster1(&(*cells)[i]._particleSoABuffer, c1start, c1start + clusterSize); const size_t c2start = clusterSize * neighbor.second; SoAView cluster2(&(*cells)[neighbor.first]._particleSoABuffer, c2start, c2start + clusterSize); _functor->SoAFunctor(cluster1, cluster2, useNewton3); } // same cluster SoAView clusterSelf(&(*cells)[i]._particleSoABuffer, clusterId * clusterSize, clusterId * clusterSize + clusterSize); _functor->SoAFunctor(clusterSelf, useNewton3); } } } void traverseCellPairsGPU() { #ifdef AUTOPAS_CUDA if (!_functor->getCudaWrapper()) { _functor->CudaFunctor(_storageCell._particleSoABufferDevice, useNewton3); return; } auto cudaSoA = _functor->createFunctorCudaSoA(_storageCell._particleSoABufferDevice); if (useNewton3) { _functor->getCudaWrapper()->CellVerletTraversalN3Wrapper( cudaSoA.get(), _storageCell._particleSoABuffer.getNumParticles() / _clusterSize, _clusterSize, *_neighborMatrixDim, _neighborMatrix->get(), 0); } else { _functor->getCudaWrapper()->CellVerletTraversalNoN3Wrapper( cudaSoA.get(), _storageCell._particleSoABuffer.getNumParticles() / _clusterSize, _clusterSize, *_neighborMatrixDim, _neighborMatrix->get(), 0); } utils::CudaExceptionHandler::checkErrorCode(cudaDeviceSynchronize()); #else utils::ExceptionHandler::exception("VerletClusterCellsTraversal was compiled without Cuda support"); #endif } /** * Returns minimal distance between the two boxes * @param box1 * @param box2 * @return distance */ inline double getMinDist(const std::array<double, 6> &box1, const std::array<double, 6> &box2) const { double sqrDist = 0; for (int i = 0; i < 3; ++i) { if (box2[i + 3] < box1[i]) { double d = box2[i + 3] - box1[i]; sqrDist += d * d; } else if (box2[i] > box1[i + 3]) { double d = box2[i] - box1[i + 3]; sqrDist += d * d; } } return sqrt(sqrDist); } /** * Pairwise functor used in this traversal */ PairwiseFunctor *_functor; /** * SoA Storage cell containing SoAs and device Memory */ ParticleCell _storageCell; // id of neighbor clusters of a clusters std::vector<std::vector<std::vector<std::pair<size_t, size_t>>>> *_neighborCellIds; size_t *_neighborMatrixDim; utils::CudaDeviceVector<unsigned int> *_neighborMatrix; const unsigned int _clusterSize; }; } // namespace autopas
ep.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB EP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // [email protected] // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: [email protected] // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //--------------------------------------------------------------------- // program EMBAR //--------------------------------------------------------------------- // M is the Log_2 of the number of complex pairs of uniform (0, 1) random // numbers. MK is the Log_2 of the size of each batch of uniform random // numbers. MK can be set for convenience on a given system, since it does // not affect the results. //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "type.h" #include "npbparams.h" #include "randdp.h" #include "timers.h" #include "print_results.h" #include "read_memory.h" #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 static double x[2*NK]; static double qq[NQ]; #pragma omp threadprivate(x,qq) static double q[NQ]; int i1=0, i2=0, i3=-1, k_temp = 0; double ssx,ssy; void vranlc_temp( int n, double *x, double a, double y[] ) { //-------------------------------------------------------------------- // // This routine generates N uniform pseudorandom double precision numbers in // the range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The N results are placed in Y and are normalized // to be between 0 and 1. X is updated to contain the new seed, so that // subsequent calls to VRANLC using the same arguments will generate a // continuous sequence. If N is zero, only initialization is performed, and // the variables X, A and Y are ignored. // // This routine is the standard version designed for scalar or RISC systems. // However, it should produce the same results on any single processor // computer with at least 48 mantissa bits in double precision floating point // data. On 64 bit systems, double precision should be disabled. // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; int i; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Generate N results. This loop is not vectorizable. //-------------------------------------------------------------------- for ( i = i3+1; i < n; i++ ) { //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3) ; *x = t3 - t46 * t4; y[i] = r46 * (*x); i3 = -1; } return; } int main(int argc, char *argv[]) { pid = atoi(argv[1]); printf("pid = %d\n",pid); /* crucial_data(x,"double",2*NK); crucial_data(qq,"double",NQ); crucial_data(q,"double",NQ); */ double Mops, t1, t2, t3, t4, x1, x2; double sx, sy, tm, an, tt, gc; double sx_verify_value, sy_verify_value, sx_err, sy_err; int np; int i, ik, kk, l, k, nit; int k_offset, j; logical verified, timers_enabled; /* consistent_data(&k,"int",1); consistent_data(&i1,"int",1); consistent_data(&i1,"int",1); */ double dum[3] = {1.0, 1.0, 1.0}; char size[16]; FILE *fp; if ((fp = fopen("timer.flag", "r")) == NULL) { timers_enabled = false; } else { timers_enabled = true; fclose(fp); } //-------------------------------------------------------------------- // Because the size of the problem is too large to store in a 32-bit // integer for some classes, we put it into a string (for printing). // Have to strip off the decimal point put in there by the floating // point print statement (internal file) //-------------------------------------------------------------------- sprintf(size, "%15.0lf", pow(2.0, M+1)); j = 14; if (size[j] == '.') j--; size[j+1] = '\0'; printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - EP Benchmark\n"); printf("\n Number of random numbers generated: %15s\n", size); printf("\n Number of available threads: %13d\n", omp_get_max_threads()); verified = false; //-------------------------------------------------------------------- // Compute the number of "batches" of random number pairs generated // per processor. Adjust if the number of processors does not evenly // divide the total number //-------------------------------------------------------------------- np = NN; //-------------------------------------------------------------------- // Call the random number generator functions and initialize // the x-array to reduce the effects of paging on the timings. // Also, call all mathematical functions that are used. Make // sure these initializations cannot be eliminated as dead code. //-------------------------------------------------------------------- vranlc(0, &dum[0], dum[1], &dum[2]); dum[0] = randlc(&dum[1], dum[2]); #pragma omp parallel default(shared) private(i) { for (i = 0; i < 2 * NK; i++) { x[i] = -1.0e99; } } Mops = log(sqrt(fabs(MAX(1.0, 1.0)))); #pragma omp parallel { timer_clear(0); if (timers_enabled) timer_clear(1); if (timers_enabled) timer_clear(2); } timer_start(0); t1 = A; vranlc(0, &t1, A, x); //-------------------------------------------------------------------- // Compute AN = A ^ (2 * NK) (mod 2^46). //-------------------------------------------------------------------- t1 = A; for (i = 0; i < MK + 1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for (i = 0; i < NQ; i++) { q[i] = 0.0; } //-------------------------------------------------------------------- // Each instance of this loop may be performed independently. We compute // the k offsets separately to take into account the fact that some nodes // have more numbers to generate than others //-------------------------------------------------------------------- k_offset = -1; //FILE *testfile; //testfile = fopen("resultfile1.out","w"); verified = true; if (M == 24) { sx_verify_value = -3.247834652034740e+3; sy_verify_value = -6.958407078382297e+3; } else if (M == 25) { sx_verify_value = -2.863319731645753e+3; sy_verify_value = -6.320053679109499e+3; } else if (M == 28) { sx_verify_value = -4.295875165629892e+3; sy_verify_value = -1.580732573678431e+4; } else if (M == 30) { sx_verify_value = 4.033815542441498e+4; sy_verify_value = -2.660669192809235e+4; } else if (M == 32) { sx_verify_value = 4.764367927995374e+4; sy_verify_value = -8.084072988043731e+4; } else if (M == 36) { sx_verify_value = 1.982481200946593e+5; sy_verify_value = -1.020596636361769e+5; } else if (M == 40) { sx_verify_value = -5.319717441530e+05; sy_verify_value = -3.688834557731e+05; } else { verified = false; } #pragma omp parallel default(shared) private(k,kk,t1,t2,t3,t4,i,ik,x1,x2,l) { for (i = 0; i < NQ; i++) { qq[i] = 0.0; } //flush_whole_cache(); //start_crash(); addr[count_addr++] = &x; addr[count_addr++] = &qq; addr[count_addr++] = &q; addr[count_addr++] = &ssx; addr[count_addr++] = &ssy; addr[count_addr++] = &k_temp; addr[count_addr++] = &i1; addr[count_addr++] = &i2; addr[count_addr++] = &i3; ReadVarriable(addr,count_addr); printf("k=%d\n",k_temp); printf("i1=%d\n",i1); printf("i2=%d\n",i2); printf("i3=%d\n",i3); printf("sx = %lf\n",ssx); printf("sy = %lf\n",ssy); sx = ssx; sy = ssy; //np = np + k_temp; #pragma omp for reduction(+:sx,sy) nowait for (k = k_temp+1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /*int ijk=0; if(k==k_temp+1){ fprintf(testfile,"before vranlc, t1 = %lf, A = %lf\n",t1, A); for( ijk = 0; ijk<2*NK; ijk++) { fprintf(testfile,"%lf\n",x[ijk]); }}*/ // Find starting seed t1 for this kk. for (i = 1; i <= 100; i++) { ik = kk / 2; if ((2 * ik) != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } //-------------------------------------------------------------------- // Compute uniform pseudorandom numbers. //-------------------------------------------------------------------- if (timers_enabled) timer_start(2); vranlc(2 * NK, &t1, A, x); if (timers_enabled) timer_stop(2); /*if(k==2981){ fprintf(testfile,"after vranlc, t1 = %lf, A = %lf\n",t1, A); for(ijk = 0; ijk<2*NK; ijk++) { fprintf(testfile,"%lf\n",x[ijk]); }}*/ //-------------------------------------------------------------------- // Compute Gaussian deviates by acceptance-rejection method and // tally counts in concentri//square annuli. This loop is not // vectorizable. //-------------------------------------------------------------------- if (timers_enabled) timer_start(1); for (i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = x1 * x1 + x2 * x2; if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); t4 = (x2 * t2); l = MAX(fabs(t3), fabs(t4)); qq[l] = qq[l] + 1.0; sx = sx + t3; sy = sy + t4; } } if (timers_enabled) timer_stop(1); //if (verified) { sx_err = fabs((sx - sx_verify_value) / sx_verify_value); sy_err = fabs((sy - sy_verify_value) / sy_verify_value); //printf("sx_err = %25.15lE, sy_err =%25.15lE\n",sx_err, sy_err); verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON)); if(verified) printf("k=%d, SUCCESS!\n",k); //} } //end_crash(); for (i = 0; i < NQ; i++) { #pragma omp atomic q[i] += qq[i]; } } //fclose(testfile); for (i = 0; i < NQ; i++) { gc = gc + q[i]; } timer_stop(0); tm = timer_read(0); nit = 0; verified = 1; if (verified) { sx_err = fabs((sx - sx_verify_value) / sx_verify_value); sy_err = fabs((sy - sy_verify_value) / sy_verify_value); printf("sx_err = %25.15lE, sy_err =%25.15lE\n",sx_err, sy_err); verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON)); } FILE *file; char result_file[MAX_FILE_PATH] = "/home/cc/nvc/tests/recompute_result.out.jie"; sprintf(result_file + strlen(result_file), "%d", pid); file = fopen(result_file,"w"); if(verified) { fprintf(file,"SUCCESS\n"); } else{ fprintf(file,"UNSUCCESS\n"); } fclose(file); Mops = pow(2.0, M+1) / tm / 1000000.0; printf("\nEP Benchmark Results:\n\n"); printf("CPU Time =%10.4lf\n", tm); printf("N = 2^%5d\n", M); printf("No. Gaussian Pairs = %15.0lf\n", gc); printf("Sums = %25.15lE %25.15lE\n", sx, sy); printf("Counts: \n"); for (i = 0; i < NQ; i++) { printf("%3d%15.0lf\n", i, q[i]); } print_results("EP", CLASS, M+1, 0, 0, nit, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (timers_enabled) { if (tm <= 0.0) tm = 1.0; tt = timer_read(0); printf("\nTotal time: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); tt = timer_read(1); printf("Gaussian pairs: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); tt = timer_read(2); printf("Random numbers: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm); } return 0; }
IBK_messages.h
/* Copyright (c) 2001-2017, Institut für Bauklimatik, TU Dresden, Germany Written by A. Nicolai, H. Fechner, St. Vogelsang, A. Paepcke, J. Grunewald All rights reserved. This file is part of the IBK Library. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This library contains derivative work based on other open-source libraries. See OTHER_LICENCES and source code headers for details. */ #ifndef IBK_messagesH #define IBK_messagesH #include <string> #include "IBK_FormatString.h" #include "IBK_MessageHandlerRegistry.h" namespace IBK { #define IBK_MessageFilter(x) if (IBK::MessageHandlerRegistry::instance().messageHandler()->consoleVerbosityLevel() >= (x)) /*! Avoid costly construction of messages that will be disregarded anyway because of too low console verbosity level. Use as drop-in replacement for IBK::IBK_Message, for example: \code // Mind the parantheses... IBK_FastMessage(IBK::VL_DEVELOPER)("message text", ...); \endcode */ #define IBK_FastMessage(x) if (IBK::MessageHandlerRegistry::instance().messageHandler()->consoleVerbosityLevel() >= (x))IBK::IBK_Message /*! Prototype for the message function. The function will be called from certain IBK classes. */ inline void IBK_Message(const std::string& msg, msg_type_t t = MSG_PROGRESS, const char * func_id = NULL, int verbose_level = VL_ALL) { #ifdef _OPENMP #pragma omp master #endif MessageHandlerRegistry::instance().msg(msg, t, func_id, verbose_level); } /*! Prototype for the message function. The function will be called from certain IBK classes. */ inline void IBK_Message(const IBK::FormatString& msg, msg_type_t t = MSG_PROGRESS, const char * func_id = NULL, int verbose_level = VL_ALL) { #ifdef _OPENMP #pragma omp master #endif MessageHandlerRegistry::instance().msg(msg.str(), t, func_id, verbose_level); } /*! Indentor can be used to increase the message indentation level within local function scopes. */ class MessageIndentor { public: /*! Constructor increases indentation. */ MessageIndentor() { ++(MessageHandlerRegistry::instance().messageHandler()->m_indentation); } /*! Destructor decreases indentation. */ ~MessageIndentor() { --(MessageHandlerRegistry::instance().messageHandler()->m_indentation); } }; /*! Foreground and background colors. */ enum ConsoleColor { CF_BLACK = 0x00, CF_BLUE = 0x01, CF_GREEN = 0x02, CF_CYAN = 0x03, CF_RED = 0x04, CF_MAGENTA = 0x05, CF_YELLOW = 0x06, CF_GREY = 0x07, CF_DARK_GREY = 0x08, CF_BRIGHT_BLUE = 0x09, CF_BRIGHT_GREEN = 0x0A, CF_BRIGHT_CYAN = 0x0B, CF_BRIGHT_RED = 0x0C, CF_BRIGHT_MAGENTA = 0x0D, CF_BRIGHT_YELLOW = 0x0E, CF_WHITE = 0x0F, CB_BLACK = 0x00, CB_BLUE = 0x10, CB_GREEN = 0x20, CB_RED = 0x30, CB_MAGENTA = 0x40, CB_CYAN = 0x50, CB_YELLOW = 0x60, CB_GREY = 0x70, CB_DARK_GREY = 0x80, CB_BRIGHT_BLUE = 0x90, CB_BRIGHT_GREEN = 0xA0, CB_BRIGHT_RED = 0xB0, CB_BRIGHT_MAGENTA = 0xC0, CB_BRIGHT_CYAN = 0xD0, CB_BRIGHT_YELLOW = 0xE0, CB_WHITE = 0xF0 }; /*! Sets the color of the console text used for the next output. The color 'c' is a logical or of the foreground and background colors defined in the ConsoleColor enum. To set a bright yellow text color with a blue background use: \code set_console_text_color(CF_BRIGHT_YELLOW | CB_BLUE); \endcode */ void set_console_text_color(ConsoleColor c); /*! \file IBK_messages.h \brief Contains declarations for the IBK_Message() functions and the class MessageIndentor, central include file for IBK-Message system. */ extern const char * const TERMINAL_CODES[16]; } // namespace IBK #endif // IBK_messagesH
spmv_float_avx2.c
////Example of sparse matrix-vector multiply, using CSR (compressed sparse row format). #include <stdio.h> #include <stdlib.h> #include <string.h> // Add timing support #include <sys/timeb.h> #define REAL float double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //#define DEFAULT_DIMSIZE 256 void print_array(char *title, char *name, REAL *A, int n, int m) { printf("%s:\n", title); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ int main(int argc, char *argv[]) { int *ia, *ja; REAL *a, *x, *y; int row, i, j, idx, n, nnzMax, nnz, nrows; REAL ts, t, rate; n = 10240; //n = 24; if (argc > 1) n = atoi(argv[1]); nrows = n * n; nnzMax = nrows * 5; ia = (int*)malloc(nrows*sizeof(int)); ja = (int*)malloc(nnzMax*sizeof(int)); a = (REAL*)malloc(nnzMax*sizeof(REAL)); /* Allocate the source and result vectors */ x = (REAL*)malloc(nrows*sizeof(REAL)); y = (REAL*)malloc(nrows*sizeof(REAL)); row = 0; nnz = 0; for (i=0; i<n; i++) { for (j=0; j<n; j++) { ia[row] = nnz; if (i>0) { ja[nnz] = row - n; a[nnz] = -1.0; nnz++; } if (j>0) { ja[nnz] = row - 1; a[nnz] = -1.0; nnz++; } ja[nnz] = row; a[nnz] = 4.0; nnz++; if (j<n-1) { ja[nnz] = row + 1; a[nnz] = -1.0; nnz++; } if (i<n-1) { ja[nnz] = row + n; a[nnz] = -1.0; nnz++; } row++; } } ia[row] = nnz; /* Create the source (x) vector */ for (i=0; i<nrows; i++) x[i] = 1.0; double elapsed = read_timer(); for (row=0; row<nrows; row++) { REAL sum = 0.0; #pragma omp simd simdlen(8) reduction(+:sum) for (idx=ia[row]; idx<ia[row+1]; idx++) { sum += a[idx] * x[ja[idx]]; } y[row] = sum; } elapsed = read_timer() - elapsed; printf("seq elasped time(s): %.4f\n", elapsed); int errors = 0; for (row=0; row<nrows; row++) { if (y[row] < 0) { //fprintf(stderr,"y[%d]=%f, fails consistency test\n", row, y[row]); ++errors; } } printf("Errors: %d\n", errors); free(ia); free(ja); free(a); free(x); free(y); return 0; }
GB_binop__land_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int16) // A.*B function (eWiseMult): GB (_AemultB_08__land_int16) // A.*B function (eWiseMult): GB (_AemultB_02__land_int16) // A.*B function (eWiseMult): GB (_AemultB_04__land_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16) // A*D function (colscale): GB (_AxD__land_int16) // D*A function (rowscale): GB (_DxB__land_int16) // C+=B function (dense accum): GB (_Cdense_accumB__land_int16) // C+=b function (dense accum): GB (_Cdense_accumb__land_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16) // C=scalar+B GB (_bind1st__land_int16) // C=scalar+B' GB (_bind1st_tran__land_int16) // C=A+scalar GB (_bind2nd__land_int16) // C=A'+scalar GB (_bind2nd_tran__land_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pair_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_int16 // A.*B function (eWiseMult): GB_AemultB__pair_int16 // A*D function (colscale): GB_AxD__pair_int16 // D*A function (rowscale): GB_DxB__pair_int16 // C+=B function (dense accum): GB_Cdense_accumB__pair_int16 // C+=b function (dense accum): GB_Cdense_accumb__pair_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_int16 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = 1 #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT16 || GxB_NO_PAIR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
Accessor.h
//=== libompx/Accessor.h - Accessor class definition ----*- C++ -*-===// // // Part of the OMP-UL Project, under the MIT License. // See libompx/LICENSE.txt for license information // //===-------------------------------------------------------------===// /// /// \file /// This file contains the declaration of the Accessor class, which is /// used to wrap dynamically allocated regions pointers to provide /// transparent data movement through OpenMP declare mapper /// //===-------------------------------------------------------------===// #ifndef LIBOMPX_ACCESSOR_H #define LIBOMPX_ACCESSOR_H namespace libompx { /// enum to hold different access modes enum access_mode{READ, WRITE, READ_WRITE, ALLOC}; /// Base Accessor Class Declaration, not intended for direct use by /// user template <access_mode A, typename T> class Accessor_Base { protected: /// Constructor to be used only by child classes Accessor_Base(T* data, size_t len):_data(data), _len(len) {}; public: // _data and _len need to be public to be used in declare mapper // pointer to the data /// Pointer to the original data allocated by user T* _data; /// Length of user-allocated data region size_t _len; /// Returns the length of the user-specified data region size_t getLen() const{ return _len; }; /// Return a pointer the start of data region const T* begin() const { return _data; }; /// Returns a pointer to the end of data region const T* end() const { return _data+_len; }; }; /// Child Accessor 1: All access modes except for READ template <access_mode A, typename T> class Accessor: public Accessor_Base<A, T>{ public: /// Accessor constructor Accessor(T* data, size_t len): Accessor_Base<A, T>(data, len){ }; /// Returns a pointer to the user-specified data region T* getData() const{ return this->_data; }; /// [] operator overloading returns a reference to a data element /// to allow data maniuplation in WRITE/READ_WRITE modes T& operator[](const int idx) const { return this->_data[idx]; }; }; /// Child Accessor 2: access_mode specialized for READ template <typename T> class Accessor<READ, T>: public Accessor_Base<READ, T>{ public: /// Accessor constructor Accessor(T* data, size_t len): Accessor_Base<READ, T>(data, len) {}; /// Returns a const pointer to the user-specified data region /// to prevent modification of data elements (since access_mode is READ) const T* getData() const { return this->_data; }; /// [] operator overloading returns a const reference to prevent /// modification of data elements (since access_mode is READ) const T& operator[](const int idx) const { return this->_data[idx]; }; }; } /// different mappers based on access mode #pragma omp declare mapper(libompx::Accessor<libompx::READ, double> a) map(to: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::WRITE, double> a) map(from: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::READ_WRITE, double> a) map(tofrom: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::ALLOC, double> a) map(alloc: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::READ, float> a) map(to: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::WRITE, float> a) map(from: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::READ_WRITE, float> a) map(tofrom: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::ALLOC, float> a) map(alloc: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::READ, int> a) map(to: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::WRITE, int> a) map(from: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::READ_WRITE, int> a) map(tofrom: a._data[0:a._len]) #pragma omp declare mapper(libompx::Accessor<libompx::ALLOC, int> a) map(alloc: a._data[0:a._len]) #endif ///===--- vim: set ft=cpp sw=2 ts=2 sts=2 et: ----------------------===///
mm_p_collapse.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char* argv[]) { printf("entered main function!"); int size = 1024; int (*matrix_A)[size] = malloc(sizeof(int[size][size])); int (*matrix_B)[size] = malloc(sizeof(int[size][size])); int (*result)[size] = malloc(sizeof(int[size][size])); printf("const set-up done!"); //Initialize matrix: for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { matrix_A[i][j] = rand(); matrix_B[i][j] = rand(); result[i][j] = 0; } } printf("matrix initialization done!"); //Matrix multiplication double t1 = omp_get_wtime(); omp_set_num_threads(16); #pragma omp parallel for collapse(3) shared(matrix_A,matrix_B,result) for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { for (int k = 0; k < size; k++) { result[i][j] += matrix_A[i][k] * matrix_B[k][j]; } } } printf("matrix multiplication done!"); double t2 = omp_get_wtime(); printf("%f\n", t2 - t1); free(matrix_A); free(matrix_B); free(result); return 0; }
mmp_cons.c
#include "XSbench_header.h" #ifdef MPI #include<mpi.h> #endif int main( int argc, char* argv[] ) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 19; int mype = 0; double omp_start, omp_end; int nprocs = 1; unsigned long long verification; #ifdef MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif // Process CLI Fields -- store in "Inputs" structure Inputs in = read_CLI( argc, argv ); // Set number of OpenMP Threads #ifdef OPENMP omp_set_num_threads(in.nthreads); #endif // Print-out of Input Summary if( mype == 0 ) print_inputs( in, nprocs, version ); // ===================================================================== // Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data // This is not reflective of a real Monte Carlo simulation workload, // therefore, do not profile this region! // ===================================================================== SimulationData SD; // If read from file mode is selected, skip initialization and load // all simulation data structures from file instead if( in.binary_mode == READ ) SD = binary_read(in); else SD = grid_init_do_not_profile( in, mype ); // If writing from file mode is selected, write all simulation data // structures to file if( in.binary_mode == WRITE && mype == 0 ) binary_write(in, SD); // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation // This is the section that should be profiled, as it reflects a // realistic continuous energy Monte Carlo macroscopic cross section // lookup kernel. // ===================================================================== if( mype == 0 ) { printf("\n"); border_print(); center_print("SIMULATION", 79); border_print(); } // Start Simulation Timer omp_start = get_time(); // Run simulation if( in.simulation_method == EVENT_BASED ) { if( in.kernel_id == 0 ) verification = run_event_based_simulation(in, SD, mype); else if( in.kernel_id == 1 ) verification = run_event_based_simulation_optimization_1(in, SD, mype); else { printf("Error: No kernel ID %d found!\n", in.kernel_id); exit(1); } } else verification = run_history_based_simulation(in, SD, mype); if( mype == 0) { printf("\n" ); printf("Simulation complete.\n" ); } // End Simulation Timer omp_end = get_time(); // ===================================================================== // Output Results & Finalize // ===================================================================== // Final Hash Step verification = verification % 999983; // Print / Save Results and Exit int is_invalid_result = print_results( in, mype, omp_end-omp_start, nprocs, verification ); #ifdef MPI MPI_Finalize(); #endif return is_invalid_result; } //io.c // Prints program logo void logo(int version) { border_print(); printf( " __ __ ___________ _ \n" " \\ \\ / // ___| ___ \\ | | \n" " \\ V / \\ `--.| |_/ / ___ _ __ ___| |__ \n" " / \\ `--. \\ ___ \\/ _ \\ '_ \\ / __| '_ \\ \n" " / /^\\ \\/\\__/ / |_/ / __/ | | | (__| | | | \n" " \\/ \\/\\____/\\____/ \\___|_| |_|\\___|_| |_| \n\n" ); border_print(); center_print("Developed at Argonne National Laboratory", 79); char v[100]; sprintf(v, "Version: %d", version); center_print(v, 79); border_print(); } // Prints Section titles in center of 80 char terminal void center_print(const char *s, int width) { int length = strlen(s); int i; for (i=0; i<=(width-length)/2; i++) { fputs(" ", stdout); } fputs(s, stdout); fputs("\n", stdout); } int print_results( Inputs in, int mype, double runtime, int nprocs, unsigned long long vhash ) { // Calculate Lookups per sec int lookups = 0; if( in.simulation_method == HISTORY_BASED ) lookups = in.lookups * in.particles; else if( in.simulation_method == EVENT_BASED ) lookups = in.lookups; int lookups_per_sec = (int) ((double) lookups / runtime); // If running in MPI, reduce timing statistics and calculate average #ifdef MPI int total_lookups = 0; MPI_Barrier(MPI_COMM_WORLD); MPI_Reduce(&lookups_per_sec, &total_lookups, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); #endif int is_invalid_result = 1; // Print output if( mype == 0 ) { border_print(); center_print("RESULTS", 79); border_print(); // Print the results printf("Threads: %d\n", in.nthreads); #ifdef MPI printf("MPI ranks: %d\n", nprocs); #endif #ifdef MPI printf("Total Lookups/s: "); fancy_int(total_lookups); printf("Avg Lookups/s per MPI rank: "); fancy_int(total_lookups / nprocs); #else printf("Runtime: %.3lf seconds\n", runtime); printf("Lookups: "); fancy_int(lookups); printf("Lookups/s: "); fancy_int(lookups_per_sec); #endif } unsigned long long large = 0; unsigned long long small = 0; if( in.simulation_method == EVENT_BASED ) { small = 945990; large = 952131; } else if( in.simulation_method == HISTORY_BASED ) { small = 941535; large = 954318; } if( strcmp(in.HM, "large") == 0 ) { if( vhash == large ) is_invalid_result = 0; } else if( strcmp(in.HM, "small") == 0 ) { if( vhash == small ) is_invalid_result = 0; } if(mype == 0 ) { if( is_invalid_result ) printf("Verification checksum: %llu (WARNING - INAVALID CHECKSUM!)\n", vhash); else printf("Verification checksum: %llu (Valid)\n", vhash); border_print(); } return is_invalid_result; } void print_inputs(Inputs in, int nprocs, int version ) { // Calculate Estimate of Memory Usage int mem_tot = estimate_mem_usage( in ); logo(version); center_print("INPUT SUMMARY", 79); border_print(); if( in.simulation_method == EVENT_BASED ) printf("Simulation Method: Event Based\n"); else printf("Simulation Method: History Based\n"); if( in.grid_type == NUCLIDE ) printf("Grid Type: Nuclide Grid\n"); else if( in.grid_type == UNIONIZED ) printf("Grid Type: Unionized Grid\n"); else printf("Grid Type: Hash\n"); printf("Materials: %d\n", 12); printf("H-M Benchmark Size: %s\n", in.HM); printf("Total Nuclides: %ld\n", in.n_isotopes); printf("Gridpoints (per Nuclide): "); fancy_int(in.n_gridpoints); if( in.grid_type == HASH ) { printf("Hash Bins: "); fancy_int(in.hash_bins); } if( in.grid_type == UNIONIZED ) { printf("Unionized Energy Gridpoints: "); fancy_int(in.n_isotopes*in.n_gridpoints); } if( in.simulation_method == HISTORY_BASED ) { printf("Particle Histories: "); fancy_int(in.particles); printf("XS Lookups per Particle: "); fancy_int(in.lookups); } printf("Total XS Lookups: "); fancy_int(in.lookups); #ifdef MPI printf("MPI Ranks: %d\n", nprocs); printf("OMP Threads per MPI Rank: %d\n", in.nthreads); printf("Mem Usage per MPI Rank (MB): "); fancy_int(mem_tot); #else printf("Threads: %d\n", in.nthreads); printf("Est. Memory Usage (MB): "); fancy_int(mem_tot); #endif printf("Binary File Mode: "); if( in.binary_mode == NONE ) printf("Off\n"); else if( in.binary_mode == READ) printf("Read\n"); else printf("Write\n"); border_print(); center_print("INITIALIZATION - DO NOT PROFILE", 79); border_print(); } void border_print(void) { printf( "===================================================================" "=============\n"); } // Prints comma separated integers - for ease of reading void fancy_int( long a ) { if( a < 1000 ) printf("%ld\n",a); else if( a >= 1000 && a < 1000000 ) printf("%ld,%03ld\n", a / 1000, a % 1000); else if( a >= 1000000 && a < 1000000000 ) printf("%ld,%03ld,%03ld\n",a / 1000000,(a % 1000000) / 1000,a % 1000 ); else if( a >= 1000000000 ) printf("%ld,%03ld,%03ld,%03ld\n", a / 1000000000, (a % 1000000000) / 1000000, (a % 1000000) / 1000, a % 1000 ); else printf("%ld\n",a); } void print_CLI_error(void) { printf("Usage: ./XSBench <options>\n"); printf("Options include:\n"); printf(" -m <simulation method> Simulation method (history, event)\n"); printf(" -t <threads> Number of OpenMP threads to run\n"); printf(" -s <size> Size of H-M Benchmark to run (small, large, XL, XXL)\n"); printf(" -g <gridpoints> Number of gridpoints per nuclide (overrides -s defaults)\n"); printf(" -G <grid type> Grid search type (unionized, nuclide, hash). Defaults to unionized.\n"); printf(" -p <particles> Number of particle histories\n"); printf(" -l <lookups> History Based: Number of Cross-section (XS) lookups per particle. Event Based: Total number of XS lookups.\n"); printf(" -h <hash bins> Number of hash bins (only relevant when used with \"-G hash\")\n"); printf(" -b <binary mode> Read or write all data structures to file. If reading, this will skip initialization phase. (read, write)\n"); printf(" -k <kernel ID> Specifies which kernel to run. 0 is baseline, 1, 2, etc are optimized variants. (0 is default.)\n"); printf("Default is equivalent to: -m history -s large -l 34 -p 500000 -G unionized\n"); printf("See readme for full description of default run values\n"); exit(4); } Inputs read_CLI( int argc, char * argv[] ) { Inputs input; // defaults to the history based simulation method input.simulation_method = HISTORY_BASED; // defaults to max threads on the system #ifdef OPENMP //input.nthreads = omp_get_num_procs(); input.nthreads = #P0; #else input.nthreads = 1; #endif // defaults to 355 (corresponding to H-M Large benchmark) input.n_isotopes = 355; // defaults to 11303 (corresponding to H-M Large benchmark) input.n_gridpoints = 11303; // defaults to 500,000 input.particles = 500000; // defaults to 34 input.lookups = 34; // default to unionized grid input.grid_type = UNIONIZED; // default to unionized grid input.hash_bins = 10000; // default to no binary read/write input.binary_mode = NONE; // defaults to baseline kernel input.kernel_id = 0; // defaults to H-M Large benchmark input.HM = (char *) malloc( 6 * sizeof(char) ); input.HM[0] = 'l' ; input.HM[1] = 'a' ; input.HM[2] = 'r' ; input.HM[3] = 'g' ; input.HM[4] = 'e' ; input.HM[5] = '\0'; // Check if user sets these int user_g = 0; int default_lookups = 1; int default_particles = 1; // Collect Raw Input for( int i = 1; i < argc; i++ ) { char * arg = argv[i]; // nthreads (-t) if( strcmp(arg, "-t") == 0 ) { if( ++i < argc ) input.nthreads = atoi(argv[i]); else print_CLI_error(); } // n_gridpoints (-g) else if( strcmp(arg, "-g") == 0 ) { if( ++i < argc ) { user_g = 1; input.n_gridpoints = atol(argv[i]); } else print_CLI_error(); } // Simulation Method (-m) else if( strcmp(arg, "-m") == 0 ) { char * sim_type; if( ++i < argc ) sim_type = argv[i]; else print_CLI_error(); if( strcmp(sim_type, "history") == 0 ) input.simulation_method = HISTORY_BASED; else if( strcmp(sim_type, "event") == 0 ) { input.simulation_method = EVENT_BASED; // Also resets default # of lookups if( default_lookups && default_particles ) { input.lookups = input.lookups * input.particles; input.particles = 0; } } else print_CLI_error(); } // lookups (-l) else if( strcmp(arg, "-l") == 0 ) { if( ++i < argc ) { input.lookups = atoi(argv[i]); default_lookups = 0; } else print_CLI_error(); } // hash bins (-h) else if( strcmp(arg, "-h") == 0 ) { if( ++i < argc ) input.hash_bins = atoi(argv[i]); else print_CLI_error(); } // particles (-p) else if( strcmp(arg, "-p") == 0 ) { if( ++i < argc ) { input.particles = atoi(argv[i]); default_particles = 0; } else print_CLI_error(); } // HM (-s) else if( strcmp(arg, "-s") == 0 ) { if( ++i < argc ) input.HM = argv[i]; else print_CLI_error(); } // grid type (-G) else if( strcmp(arg, "-G") == 0 ) { char * grid_type; if( ++i < argc ) grid_type = argv[i]; else print_CLI_error(); if( strcmp(grid_type, "unionized") == 0 ) input.grid_type = UNIONIZED; else if( strcmp(grid_type, "nuclide") == 0 ) input.grid_type = NUCLIDE; else if( strcmp(grid_type, "hash") == 0 ) input.grid_type = HASH; else print_CLI_error(); } // binary mode (-b) else if( strcmp(arg, "-b") == 0 ) { char * binary_mode; if( ++i < argc ) binary_mode = argv[i]; else print_CLI_error(); if( strcmp(binary_mode, "read") == 0 ) input.binary_mode = READ; else if( strcmp(binary_mode, "write") == 0 ) input.binary_mode = WRITE; else print_CLI_error(); } // kernel optimization selection (-k) else if( strcmp(arg, "-k") == 0 ) { if( ++i < argc ) { input.kernel_id = atoi(argv[i]); } else print_CLI_error(); } else print_CLI_error(); } // Validate Input // Validate nthreads if( input.nthreads < 1 ) print_CLI_error(); // Validate n_isotopes if( input.n_isotopes < 1 ) print_CLI_error(); // Validate n_gridpoints if( input.n_gridpoints < 1 ) print_CLI_error(); // Validate lookups if( input.lookups < 1 ) print_CLI_error(); // Validate Hash Bins if( input.hash_bins < 1 ) print_CLI_error(); // Validate HM size if( strcasecmp(input.HM, "small") != 0 && strcasecmp(input.HM, "large") != 0 && strcasecmp(input.HM, "XL") != 0 && strcasecmp(input.HM, "XXL") != 0 ) print_CLI_error(); // Set HM size specific parameters // (defaults to large) if( strcasecmp(input.HM, "small") == 0 ) input.n_isotopes = 68; else if( strcasecmp(input.HM, "XL") == 0 && user_g == 0 ) input.n_gridpoints = 238847; // sized to make 120 GB XS data else if( strcasecmp(input.HM, "XXL") == 0 && user_g == 0 ) input.n_gridpoints = 238847 * 2.1; // 252 GB XS data // Return input struct return input; } void binary_write( Inputs in, SimulationData SD ) { char * fname = "XS_data.dat"; printf("Writing all data structures to binary file %s...\n", fname); FILE * fp = fopen(fname, "w"); // Write SimulationData Object. Include pointers, even though we won't be using them. fwrite(&SD, sizeof(SimulationData), 1, fp); // Write heap arrays in SimulationData Object fwrite(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp); fwrite(SD.concs, sizeof(double), SD.length_concs, fp); fwrite(SD.mats, sizeof(int), SD.length_mats, fp); fwrite(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp); fwrite(SD.index_grid, sizeof(int), SD.length_index_grid, fp); fwrite(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp); fclose(fp); } SimulationData binary_read( Inputs in ) { SimulationData SD; char * fname = "XS_data.dat"; printf("Reading all data structures from binary file %s...\n", fname); FILE * fp = fopen(fname, "r"); assert(fp != NULL); // Read SimulationData Object. Include pointers, even though we won't be using them. fread(&SD, sizeof(SimulationData), 1, fp); // Allocate space for arrays on heap SD.num_nucs = (int *) malloc(SD.length_num_nucs * sizeof(int)); SD.concs = (double *) malloc(SD.length_concs * sizeof(double)); SD.mats = (int *) malloc(SD.length_mats * sizeof(int)); SD.nuclide_grid = (NuclideGridPoint *) malloc(SD.length_nuclide_grid * sizeof(NuclideGridPoint)); SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double)); // Read heap arrays into SimulationData Object fread(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp); fread(SD.concs, sizeof(double), SD.length_concs, fp); fread(SD.mats, sizeof(int), SD.length_mats, fp); fread(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp); fread(SD.index_grid, sizeof(int), SD.length_index_grid, fp); fread(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp); fclose(fp); return SD; } //Simulation.c //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning event based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation Loop //////////////////////////////////////////////////////////////////////////////// unsigned long long verification = 0; //#pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification) #pragma omp parallel for schedule(#P1) reduction(+:verification) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } return verification; } unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning history based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// unsigned long long verification = 0; // Begin outer lookup loop over particles. This loop is independent. //#pragma omp parallel for schedule(dynamic, #P1) reduction(+:verification) #pragma omp parallel for schedule(#P1) reduction(+:verification) for( int p = 0; p < in.particles; p++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup, and // we may fast forward up to 5 times after each lookup) seed = fast_forward_LCG(seed, p*in.lookups*2*5); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Inner XS Lookup Loop // This loop is dependent! // i.e., Next iteration uses data computed in previous iter. for( int i = 0; i < in.lookups; i++ ) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices for each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookups) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on it. For other accelerators, // a different approach might be required (e.g., atomics, reduction // of thread-specific values in large array via CUDA thrust, etc) double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; // Randomly pick next energy and material for the particle // Also incorporates results from macro_xs lookup to // enforce loop dependency. // In a real MC app, this dependency is expressed in terms // of branching physics sampling, whereas here we are just // artificially enforcing this dependence based on fast // forwarding the LCG state uint64_t n_forward = 0; for( int j = 0; j < 5; j++ ) if( macro_xs_vector[j] > 1.0 ) n_forward++; if( n_forward > 0 ) seed = fast_forward_LCG(seed, n_forward); p_energy = LCG_random_double(&seed); mat = pick_mat(&seed); } } return verification; } // Calculates the microscopic cross section for a given nuclide & energy void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * restrict egrid, int * restrict index_data, NuclideGridPoint * restrict nuclide_grids, long idx, double * restrict xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * restrict num_nucs, double * restrict concs, double * restrict egrid, int * restrict index_data, NuclideGridPoint * restrict nuclide_grids, int * restrict mats, double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index long grid_search( long n, double quarry, double * restrict A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies. // By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. // // As fast parallel sorting will be required for these optimizations, we will // first define a set of key-value parallel quicksort routines. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Parallel Quicksort Key-Value Sorting Algorithms //////////////////////////////////////////////////////////////////////////////////// // // These algorithms are based on the parallel quicksort implementation by // Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel // // Eduard's original version was for an integer type quicksort, but I have modified // it to form two different versions that can sort key-value pairs together without // having to bundle them into a separate object. Additionally, I have modified the // optimal chunk sizes and restricted the number of threads for the array sizing // that XSBench will be using by default. // // Eduard's original implementation carries the following license, which applies to // the following functions only: // // void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) // void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads) // void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) // void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads) // // The MIT License (MIT) // // Copyright (c) 2016 Eduard López // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // //////////////////////////////////////////////////////////////////////////////////// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) { int i = left, j = right; int tmp; int pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; double tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } } } void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff); } } } void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) { int i = left, j = right; double tmp; double pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; int tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } } } void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff); } } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting // lookups by material and energy //////////////////////////////////////////////////////////////////////////////////// // This kernel separates out the sampling and lookup regions of the event-based // model, and then sorts the lookups by material type and energy. The goal of this // optimization is to allow for greatly improved cache locality, and XS indices // loaded from memory may be re-used for multiple lookups. // // As efficienct sorting is key for performance, we also must implement an // efficient key-value parallel sorting algorithm. We also experimented with using // the C++ version of thrust for these purposes, but found that our own implemtation // was slightly faster than the thrust library version, so for speed and // simplicity we will do not add the thrust dependency. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype) { char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional data required by optimized kernel...\n"); size_t sz; size_t total_sz = 0; double start, stop; sz = in.lookups * sizeof(double); SD.p_energy_samples = (double *) malloc(sz); total_sz += sz; SD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); SD.mat_samples = (int *) malloc(sz); total_sz += sz; SD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Sample Materials and Energies //////////////////////////////////////////////////////////////////////////////// //#pragma omp parallel for schedule(dynamic, #P1) #pragma omp parallel for schedule(#P1) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); SD.p_energy_samples[i] = p_energy; SD.mat_samples[i] = mat; } if(mype == 0) printf("finished sampling...\n"); //////////////////////////////////////////////////////////////////////////////// // Sort by Material //////////////////////////////////////////////////////////////////////////////// start = get_time(); quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads); stop = get_time(); if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Sort by Energy //////////////////////////////////////////////////////////////////////////////// start = get_time(); // Count up number of each type of sample. int num_samples_per_mat[12] = {0}; for( int l = 0; l < in.lookups; l++ ) num_samples_per_mat[ SD.mat_samples[l] ]++; // Determine offsets int offsets[12] = {0}; for( int m = 1; m < 12; m++ ) offsets[m] = offsets[m-1] + num_samples_per_mat[m-1]; stop = get_time(); if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start); start = stop; // Sort each material type by energy level int offset = 0; for( int m = 0; m < 12; m++ ) quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads); stop = get_time(); if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Perform lookups for each material separately //////////////////////////////////////////////////////////////////////////////// start = get_time(); unsigned long long verification = 0; // Individual Materials offset = 0; for( int m = 0; m < 12; m++ ) { //#pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification) #pragma omp parallel for schedule(#P1) reduction(+:verification) for( int i = offset; i < offset + num_samples_per_mat[m]; i++) { // load pre-sampled energy and material for the particle double p_energy = SD.p_energy_samples[i]; int mat = SD.mat_samples[i]; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } offset += num_samples_per_mat[m]; } stop = get_time(); if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start); return verification; } //GridInit.c SimulationData grid_init_do_not_profile( Inputs in, int mype ) { // Structure to hold all allocated simuluation data arrays SimulationData SD; // Keep track of how much data we're allocating size_t nbytes = 0; // Set the initial seed value uint64_t seed = 42; //////////////////////////////////////////////////////////////////// // Initialize Nuclide Grids //////////////////////////////////////////////////////////////////// if(mype == 0) printf("Intializing nuclide grids...\n"); // First, we need to initialize our nuclide grid. This comes in the form // of a flattened 2D array that hold all the information we need to define // the cross sections for all isotopes in the simulation. // The grid is composed of "NuclideGridPoint" structures, which hold the // energy level of the grid point and all associated XS data at that level. // An array of structures (AOS) is used instead of // a structure of arrays, as the grid points themselves are accessed in // a random order, but all cross section interaction channels and the // energy level are read whenever the gridpoint is accessed, meaning the // AOS is more cache efficient. // Initialize Nuclide Grid SD.length_nuclide_grid = in.n_isotopes * in.n_gridpoints; SD.nuclide_grid = (NuclideGridPoint *) malloc( SD.length_nuclide_grid * sizeof(NuclideGridPoint)); assert(SD.nuclide_grid != NULL); nbytes += SD.length_nuclide_grid * sizeof(NuclideGridPoint); for( int i = 0; i < SD.length_nuclide_grid; i++ ) { SD.nuclide_grid[i].energy = LCG_random_double(&seed); SD.nuclide_grid[i].total_xs = LCG_random_double(&seed); SD.nuclide_grid[i].elastic_xs = LCG_random_double(&seed); SD.nuclide_grid[i].absorbtion_xs = LCG_random_double(&seed); SD.nuclide_grid[i].fission_xs = LCG_random_double(&seed); SD.nuclide_grid[i].nu_fission_xs = LCG_random_double(&seed); } // Sort so that each nuclide has data stored in ascending energy order. #P2 for( int i = 0; i < in.n_isotopes; i++ ) qsort( &SD.nuclide_grid[i*in.n_gridpoints], in.n_gridpoints, sizeof(NuclideGridPoint), NGP_compare); // error debug check /* #P2 for( int i = 0; i < in.n_isotopes; i++ ) { printf("NUCLIDE %d ==============================\n", i); for( int j = 0; j < in.n_gridpoints; j++ ) printf("E%d = %lf\n", j, SD.nuclide_grid[i * in.n_gridpoints + j].energy); } */ //////////////////////////////////////////////////////////////////// // Initialize Acceleration Structure //////////////////////////////////////////////////////////////////// if( in.grid_type == NUCLIDE ) { SD.length_unionized_energy_array = 0; SD.length_index_grid = 0; } if( in.grid_type == UNIONIZED ) { if(mype == 0) printf("Intializing unionized grid...\n"); // Allocate space to hold the union of all nuclide energy data SD.length_unionized_energy_array = in.n_isotopes * in.n_gridpoints; SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double)); assert(SD.unionized_energy_array != NULL ); nbytes += SD.length_unionized_energy_array * sizeof(double); // Copy energy data over from the nuclide energy grid #P2 for( int i = 0; i < SD.length_unionized_energy_array; i++ ) SD.unionized_energy_array[i] = SD.nuclide_grid[i].energy; // Sort unionized energy array qsort( SD.unionized_energy_array, SD.length_unionized_energy_array, sizeof(double), double_compare); // Allocate space to hold the acceleration grid indices SD.length_index_grid = SD.length_unionized_energy_array * in.n_isotopes; SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); assert(SD.index_grid != NULL); nbytes += SD.length_index_grid * sizeof(int); // Generates the double indexing grid int * idx_low = (int *) calloc( in.n_isotopes, sizeof(int)); assert(idx_low != NULL ); double * energy_high = (double *) malloc( in.n_isotopes * sizeof(double)); assert(energy_high != NULL ); #P2 for( int i = 0; i < in.n_isotopes; i++ ) energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + 1].energy; for( long e = 0; e < SD.length_unionized_energy_array; e++ ) { for( long i = 0; i < in.n_isotopes; i++ ) { double unionized_energy = SD.unionized_energy_array[e]; if( unionized_energy < energy_high[i] ) SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; else if( idx_low[i] == in.n_gridpoints - 2 ) SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; else { idx_low[i]++; SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + idx_low[i] + 1].energy; } } } free(idx_low); free(energy_high); } if( in.grid_type == HASH ) { if(mype == 0) printf("Intializing hash grid...\n"); SD.length_unionized_energy_array = 0; SD.length_index_grid = in.hash_bins * in.n_isotopes; SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); assert(SD.index_grid != NULL); nbytes += SD.length_index_grid * sizeof(int); double du = 1.0 / in.hash_bins; // For each energy level in the hash table #pragma omp parallel for for( long e = 0; e < in.hash_bins; e++ ) { double energy = e * du; // We need to determine the bounding energy levels for all isotopes for( long i = 0; i < in.n_isotopes; i++ ) { SD.index_grid[e * in.n_isotopes + i] = grid_search_nuclide( in.n_gridpoints, energy, SD.nuclide_grid + i * in.n_gridpoints, 0, in.n_gridpoints-1); } } } //////////////////////////////////////////////////////////////////// // Initialize Materials and Concentrations //////////////////////////////////////////////////////////////////// if(mype == 0) printf("Intializing material data...\n"); // Set the number of nuclides in each material SD.num_nucs = load_num_nucs(in.n_isotopes); SD.length_num_nucs = 12; // There are always 12 materials in XSBench // Intialize the flattened 2D grid of material data. The grid holds // a list of nuclide indices for each of the 12 material types. The // grid is allocated as a full square grid, even though not all // materials have the same number of nuclides. SD.mats = load_mats(SD.num_nucs, in.n_isotopes, &SD.max_num_nucs); SD.length_mats = SD.length_num_nucs * SD.max_num_nucs; // Intialize the flattened 2D grid of nuclide concentration data. The grid holds // a list of nuclide concentrations for each of the 12 material types. The // grid is allocated as a full square grid, even though not all // materials have the same number of nuclides. SD.concs = load_concs(SD.num_nucs, SD.max_num_nucs); SD.length_concs = SD.length_mats; if(mype == 0) printf("Intialization complete. Allocated %.0lf MB of data.\n", nbytes/1024.0/1024.0 ); return SD; }
CGNSUtils.h
// Gmsh - Copyright (C) 1997-2019 C. Geuzaine, J.-F. Remacle // // See the LICENSE.txt file for license information. Please report all // issues on https://gitlab.onelab.info/gmsh/gmsh/issues. // // CGNSUtils.h - Copyright (C) 2008 S. Guzik, C. Geuzaine, J.-F. Remacle // FIXME: the contents of this file will be removed in a future release of Gmsh. #ifndef CGNS_UTILS_H #define CGNS_UTILS_H #include <map> #include <vector> #include <cstdlib> #include <cstring> #include <cstddef> #include "MElement.h" #include "GFace.h" #include "GRegion.h" #include "MEdgeHash.h" #include "MFaceHash.h" #include "GmshMessage.h" // FIXME: This code is faaaar too complicated for what it does - as is the whole // CGNS mess. Until we rewrite this whole thing, let's quiet GCC about the // reinterpret_casts dereferencing type-punned pointer, which will break // strict-aliasing rules: #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" /******************************************************************************* * * This lightweight pool class attempts to reduce memory usage by having the * unused elements save links to each other using whatever memory they have * available. * * The elements (type T) need to implement the member functions: * * void set_pool_prev(void *const) - save an address * void *get_pool_prev() - return the saved address * * For example, * *----------* struct Box { int i; int j; void set_pool_prev(void *const p) { *reinterpret_cast<void**>(&i) = p; } void *get_pool_prev() { return *reinterpret_cast<void**>(&i); } }; *----------* * * Ensure that the memory used for each type T is at least 64 bits and note * that this Pool does not do any construction or destruction of the elements. * ******************************************************************************/ namespace CCon // "Custom Container" { template <typename T> class Block; /*============================================================================== * * Class Pool * * Purpose * ======= * * Interface to the pool * *============================================================================*/ template <typename T> class Pool { public: // Constructor Pool(const unsigned _blockSize = 128) : tailBlock(0), tailElement(0), blockSize(_blockSize), numUsedElement(0) { } // Destructor ~Pool() { delete_all_blocks(); } // Get an element void *get() { if(!tailElement) create_block(); void *const rval = tailElement; tailElement = static_cast<T *>(tailElement->get_pool_prev()); ++numUsedElement; return rval; } // Return an element void remit(T *const elem) { elem->set_pool_prev(tailElement); tailElement = elem; --numUsedElement; } // Free memory used by the pool void free_memory() { if(numUsedElement == 0) delete_all_blocks(); else Msg::Debug("Request to delete pool with used elements in " "CustomContainer.h"); } private: // Data Block<T> *tailBlock; T *tailElement; unsigned blockSize; unsigned numUsedElement; // Create a new block void create_block() { tailBlock = new Block<T>(tailBlock, blockSize); const unsigned back = blockSize - 1; tailBlock->array[back].set_pool_prev(tailElement); tailElement = &tailBlock->array[back]; for(int n = back; n--;) { T *const prev = tailElement--; tailElement->set_pool_prev(prev); } } // Delete all blocks void delete_all_blocks() { while(tailBlock) { Block<T> *const block = tailBlock; tailBlock = block->prev; delete block; } tailElement = 0; } // Copy and assignment are not permitted Pool(const Pool &); Pool &operator=(const Pool &); }; /*============================================================================== * * Class Block * * Purpose * ======= * * Implements a block of uninitialized memory for type T * *============================================================================*/ template <typename T> class Block { // Data Block *const prev; T *array; // Constructor Block(Block *const _prev, const unsigned blockSize) : prev(_prev) { array = static_cast<T *>(std::malloc(sizeof(T) * blockSize)); } // Destructor ~Block() { std::free(array); } friend class Pool<T>; }; /******************************************************************************* * * This allocator-like class is used to implement many small vectors which are * sized according to the number of boundary vertices commonly expected to * connect to a vertex in 2D and 3D zones: * 2 - expected faces for vertex on zone edge in 2D * 6 - expected faces for vertex on zone face in 3D * 8 - overflow faces for vertex on zone face in 3D * 16 - approximate faces for vertices on zone edges and corners in 3D * Memory for the above sizes are pooled for reuse. Larger sizes are *permitted and will be allocated using 'new' and 'delete'. * ******************************************************************************/ /*============================================================================== * * Class FaceAllocator * * Purpose * ======= * * Lightweight allocator for class FaceVector * * Notes * ===== * * - This lightweight allocator expects only primitive types. Constructors * and destructors are not called for type T. * - The memory is encapsulated in the Face* structures. Handles to these * structures use the data array. A pointer to the structure is regained * using the handle and an offset. This is mostly cautionary as the *offset is normally expected to be zero. * - No per-object data is stored as per normal requirements for allocators. * Critical OpenMP sections are defined since multiple threads can access * the allocator. * - If set_offsets is to be used, T must have a default constructor. * *============================================================================*/ template <typename T> class FaceAllocator { public: // Memory structures struct Face2 { T faces[2]; void set_pool_prev(void *const p) { *reinterpret_cast<void **>(faces) = p; } void *get_pool_prev() { return *reinterpret_cast<void **>(faces); } ptrdiff_t get_offset() const { return reinterpret_cast<const char *>(this) - reinterpret_cast<const char *>(faces); } }; struct Face6 { T faces[6]; void set_pool_prev(void *const p) { *reinterpret_cast<void **>(faces) = p; } void *get_pool_prev() { return *reinterpret_cast<void **>(faces); } ptrdiff_t get_offset() const { return reinterpret_cast<const char *>(this) - reinterpret_cast<const char *>(faces); } }; struct Face8 { T faces[8]; void set_pool_prev(void *const p) { *reinterpret_cast<void **>(faces) = p; } void *get_pool_prev() { return *reinterpret_cast<void **>(faces); } ptrdiff_t get_offset() const { return reinterpret_cast<const char *>(this) - reinterpret_cast<const char *>(faces); } }; struct Face16 { T faces[16]; void set_pool_prev(void *const p) { *reinterpret_cast<void **>(faces) = p; } void *get_pool_prev() { return *reinterpret_cast<void **>(faces); } ptrdiff_t get_offset() const { return reinterpret_cast<const char *>(this) - reinterpret_cast<const char *>(faces); } }; // Set offsets static void set_offsets() { Face2 f2; offset2 = f2.get_offset(); Face6 f6; offset6 = f6.get_offset(); Face8 f8; offset8 = f8.get_offset(); Face16 f16; offset16 = f16.get_offset(); } // Release memory used by the pools static void free_pool_memory() { face2Pool.free_memory(); face6Pool.free_memory(); face8Pool.free_memory(); face16Pool.free_memory(); } // Allocate the array void allocate(const unsigned short nCapacity, T *&faces) { #if defined(_OPENMP1) #pragma omp critical(FaceAllocator_allocate) #endif { switch(nCapacity) { case 0: faces = 0; break; case 2: { Face2 *f2 = static_cast<Face2 *>(face2Pool.get()); faces = f2->faces; } break; case 6: { Face6 *f6 = static_cast<Face6 *>(face6Pool.get()); faces = f6->faces; } break; case 8: { Face8 *f8 = static_cast<Face8 *>(face8Pool.get()); faces = f8->faces; } break; case 16: { Face16 *f16 = static_cast<Face16 *>(face16Pool.get()); faces = f16->faces; } break; default: { faces = static_cast<T *>( std::malloc(sizeof(T) * nCapacity + sizeof(void *))); } break; } } // omp: end critical } // Grow the array of faces by 2, 6, 8, and then 2*nUsed. Pools are used for // arrays with size 2, 6, and 8. void grow(unsigned short &nCapacity, T *&faces) { #if defined(_OPENMP1) #pragma omp critical(FaceAllocator_grow) #endif { switch(nCapacity) { case 0: { Face2 *f2 = static_cast<Face2 *>(face2Pool.get()); faces = f2->faces; nCapacity = 2; } break; case 2: { Face6 *f6 = static_cast<Face6 *>(face6Pool.get()); std::memcpy(f6->faces, faces, 2 * sizeof(T)); Face2 *f2 = reinterpret_cast<Face2 *>(faces + offset2); face2Pool.remit(f2); faces = f6->faces; nCapacity = 6; } break; case 6: { Face8 *f8 = static_cast<Face8 *>(face8Pool.get()); std::memcpy(f8->faces, faces, 6 * sizeof(T)); Face6 *f6 = reinterpret_cast<Face6 *>(faces + offset6); face6Pool.remit(f6); faces = f8->faces; nCapacity = 8; } break; case 8: { Face16 *f16 = static_cast<Face16 *>(face16Pool.get()); std::memcpy(f16->faces, faces, 8 * sizeof(T)); Face8 *f8 = reinterpret_cast<Face8 *>(faces + offset8); face8Pool.remit(f8); faces = f16->faces; nCapacity = 16; } break; // Allocate outside pool for more than 16 faces case 16: { Face16 *f16 = reinterpret_cast<Face16 *>(faces + offset16); faces = static_cast<T *>(std::malloc(sizeof(T) * 32 + sizeof(void *))); std::memcpy(faces, f16->faces, 16 * sizeof(T)); face16Pool.remit(f16); nCapacity = 32; } break; default: { T *newFace = static_cast<T *>( std::malloc(sizeof(T) * 2 * nCapacity + sizeof(void *))); std::memcpy(newFace, faces, nCapacity * sizeof(T)); std::free(faces); faces = newFace; nCapacity *= 2; } break; } } // omp: end critical } // Deallocate an array void deallocate(unsigned short &nCapacity, T *const faces) { #if defined(_OPENMP1) #pragma omp critical(FaceAllocator_deallocate) #endif { switch(nCapacity) { case 0: break; case 2: { Face2 *const f2 = reinterpret_cast<Face2 *>(faces + offset2); face2Pool.remit(f2); } break; case 6: { Face6 *const f6 = reinterpret_cast<Face6 *>(faces + offset6); face6Pool.remit(f6); } break; case 8: { Face8 *const f8 = reinterpret_cast<Face8 *>(faces + offset8); face8Pool.remit(f8); } break; case 16: { Face16 *const f16 = reinterpret_cast<Face16 *>(faces + offset16); face16Pool.remit(f16); } break; default: { std::free(faces); } break; } } // omp: end critical nCapacity = 0; } private: // Data static Pool<Face2> face2Pool; static Pool<Face6> face6Pool; static Pool<Face8> face8Pool; static Pool<Face16> face16Pool; static ptrdiff_t offset2; static ptrdiff_t offset6; static ptrdiff_t offset8; static ptrdiff_t offset16; }; // Definitions for static data members of class FaceAllocator template <typename T> Pool<typename FaceAllocator<T>::Face2> FaceAllocator<T>::face2Pool; template <typename T> Pool<typename FaceAllocator<T>::Face6> FaceAllocator<T>::face6Pool; template <typename T> Pool<typename FaceAllocator<T>::Face8> FaceAllocator<T>::face8Pool; template <typename T> Pool<typename FaceAllocator<T>::Face16> FaceAllocator<T>::face16Pool; template <typename T> ptrdiff_t FaceAllocator<T>::offset2 = 0; template <typename T> ptrdiff_t FaceAllocator<T>::offset6 = 0; template <typename T> ptrdiff_t FaceAllocator<T>::offset8 = 0; template <typename T> ptrdiff_t FaceAllocator<T>::offset16 = 0; /******************************************************************************* * * This class is like a std::vector but optimized for small vectors containing * elements with only primitive data. * ******************************************************************************/ /*============================================================================== * * Class FaceVector * * Purpose * ======= * * Lightweight small vectors * * Notes * ===== * * - The only way to add elements is by 'push_back' * - Erasing may reorder the elements. * - T must only contain primitive types * - init_memory() should be called before constructing any class * FaceVector<T> and release_memory() should be called after all classes * FaceVector<T> have been destroyed. These routines explictly manage * memory used by pools in the allocator. * *============================================================================*/ template <typename T> class FaceVector : public FaceAllocator<T> { protected: using FaceAllocator<T>::deallocate; using FaceAllocator<T>::grow; public: // Constructor FaceVector() : _size(0), _capacity(0) {} // Unlike std::vector, the following sets the capacity to 'n'. The size is // still 0. FaceVector(const unsigned short n) : _size(0) { _capacity = valid_capacity(n); allocate(_capacity, faces); } // Destructor ~FaceVector() { deallocate(_capacity, faces); } // Index the vector const T &operator[](const int i) const { return faces[i]; } T &operator[](const int i) { return faces[i]; } // Add element to end T &push_back(const T val) { if(_size == _capacity) grow(_capacity, faces); return faces[_size++] = val; } // Just increment the size (push an empty element) T &push_empty() { if(_size == _capacity) grow(_capacity, faces); return faces[_size++]; } // Erase an element void erase(const int i) { faces[i] = faces[--_size]; } // Vector size and capacity unsigned size() const { return _size; } unsigned capacity() const { return _capacity; } // Memory managment // Init sets offsets to ensure pointers can be recovered. It should be // called once before using FaceVector<T> static void init_memory() { FaceAllocator<T>::set_offsets(); } // This releases memory used by the pools if no pool elements are in use. It // should be called after use of FaceVector<T> is finished and all // FaceVector<T> classes have been destroyed. static void release_memory() { FaceAllocator<T>::free_pool_memory(); } private: // Data T *faces; unsigned short _size; unsigned short _capacity; // Get a valid capacity size (returns n above 16) unsigned valid_capacity(unsigned n) const { if(n == 0) return 0; if(n <= 2) return 2; if(n <= 6) return 6; if(n <= 8) return 8; if(n <= 16) return 16; return n; } }; } // End of namespace CCon #pragma GCC diagnostic pop /******************************************************************************* * * The classes in this file are used to define and generate representations of * zones. * ******************************************************************************/ /*============================================================================== * Forward declarations *============================================================================*/ template <unsigned DIM> class MZoneBoundary; /*============================================================================== * Required types *============================================================================*/ //--Record of unique elements struct ElemData { MElement *element; int index; ElemData(MElement *const _element) : element(_element), index(0) {} }; typedef std::vector<ElemData> ElementVec; //--Record of unique vertices typedef std::map<MVertex *, int, std::less<MVertex *> > VertexMap; //--Data for each face. Ultimately, only faces on the boundary of the zone are //--stored. Value type for 'BoFaceMap'. struct FaceData { MElement *parentElement; int parentFace; int parentElementIndex; int faceIndex; FaceData(MElement *const _parentElement, const int _parentFace, const int _parentElementIndex) : parentElement(_parentElement), parentFace(_parentFace), parentElementIndex(_parentElementIndex), faceIndex(0) { } }; //--Provides information and boundary faces connected to a vertex. Value type //--for 'BoVertexMap' template <typename BFMapIt> struct ZoneVertexData { CCon::FaceVector<BFMapIt> faces; // Vector optimized for storing faces int index; }; /*--------------------------------------------------------------------* * User I/O *--------------------------------------------------------------------*/ struct ElementConnectivity { std::vector<int> connectivity; int numElem; int numBoElem; int iConn; // Constructor ElementConnectivity() : numElem(0), numBoElem(0), iConn(0) {} // Member functions void add_to_connectivity(const int i) { connectivity[iConn++] = i; } void clear() { connectivity.clear(); numElem = 0; numBoElem = 0; iConn = 0; } }; //--Output type for zone element connectivity typedef std::vector<ElementConnectivity> ElementConnectivityVec; //--Output type for vertices in the zone typedef std::vector<MVertex *> VertexVec; /*============================================================================== * Traits classes - that return information about a type *============================================================================*/ template <typename FaceT> struct LFaceTr; template <> struct LFaceTr<MEdge> { typedef std::map<MEdge, FaceData, Less_Edge> BoFaceMap; }; template <> struct LFaceTr<MFace> { typedef std::map<MFace, FaceData, Less_Face> BoFaceMap; }; /******************************************************************************* * * class: MZone * * Purpose * ======= * * Generates a definition of a zone based on entities and/or partitions. * * Template parameters: * DIM - dimension of the problem * * Notes * ===== * * - explicitly instantiated in 'MZone.cpp' * - this class uses some explicit memory management. Call preInit() before * constructing any class MZone and postDestroy() after all MZone classes * have been destroyed. * ******************************************************************************/ template <unsigned DIM> class MZone { /*============================================================================== * Internal types *============================================================================*/ public: typedef typename DimTr<DIM>::FaceT FaceT; typedef typename LFaceTr<FaceT>::BoFaceMap BoFaceMap; typedef typename std::map<const MVertex *, ZoneVertexData<typename BoFaceMap::const_iterator>, std::less<const MVertex *> > BoVertexMap; /*============================================================================== * Member functions *============================================================================*/ public: //--Default constructor. MZone() : numBoVert(0) { elemVec.reserve(8192); } /*--------------------------------------------------------------------* * Elements added from entities. * Note: It is much easier to keep these in the .cpp file but that * requries explicit instantiations for each Ent and EntIter. * Currently, instantiations only exist for containers of type: * vector * More can be added as required at the end of MZone.cpp *--------------------------------------------------------------------*/ //--Add all elements in a container of entities. The specific type of entity //--is not known and must be specified as parameter 'Ent'. template <typename EntIter> void add_elements_in_entities(EntIter begin, EntIter end, const int partition = -1); //--Add elements in a single entity. template <typename EntPtr> void add_elements_in_entity(EntPtr entity, const int partition = -1); /*--------------------------------------------------------------------* * Reset the database *--------------------------------------------------------------------*/ void clear() { elemVec.clear(); vertMap.clear(); boFaceMap.clear(); boVertMap.clear(); zoneVertVec.clear(); for(int i = 0; i != MSH_MAX_NUM; ++i) { zoneElemConn[i].clear(); } numBoVert = 0; } /*--------------------------------------------------------------------* * Process/query the zone - only after all elements have been added! *--------------------------------------------------------------------*/ //--Compute the zone data int zoneData(); //--Total number of elements int totalElements() const { int numElem = 0; for(int iElemType = 0; iElemType != MSH_MAX_NUM; ++iElemType) numElem += zoneElemConn[iElemType].numElem; return numElem; } //--Number of element types int numElementTypes() const { int numElemType = 0; for(int iElemType = 0; iElemType != MSH_MAX_NUM; ++iElemType) if(zoneElemConn[iElemType].numElem > 0) ++numElemType; return numElemType; } //--Memory management static void preInit() { CCon::FaceVector<typename BoFaceMap::const_iterator>::init_memory(); } static void postDestroy() { CCon::FaceVector<typename BoFaceMap::const_iterator>::release_memory(); } /*============================================================================== * Member data *============================================================================*/ private: //--Data members ElementVec elemVec; // Set of unique elements VertexMap vertMap; // Set of unique vertices and associated // numbers in the zone BoFaceMap boFaceMap; // Map of boundary faces BoVertexMap boVertMap; // Map of boundary vertices public: // I/O VertexVec zoneVertVec; ElementConnectivity zoneElemConn[MSH_MAX_NUM]; // Connectivity for each type of element int numBoVert; /*============================================================================== * Friends *============================================================================*/ friend class MZoneBoundary<DIM>; }; /******************************************************************************* * * - The classes in this file are used to determine connectivity between * multiple zones and, eventually, the domain boundary vertices * ******************************************************************************/ /*============================================================================== * Required types *============================================================================*/ /*--------------------------------------------------------------------* * Internal zone connectivity *--------------------------------------------------------------------*/ //--Interface between two zones for connectivity struct ZonePair { int zone1; int zone2; ZonePair(const int _zone1, const int _zone2) { if(_zone1 < _zone2) { zone1 = _zone1; zone2 = _zone2; } else { zone1 = _zone2; zone2 = _zone1; } } }; inline bool operator==(const ZonePair &zpA, const ZonePair &zpB) { return (zpA.zone1 == zpB.zone1 && zpA.zone2 == zpB.zone2); } // Less than for std::map struct Less_ZonePair : public std::binary_function<ZonePair, ZonePair, bool> { bool operator()(const ZonePair &zpA, const ZonePair &zpB) const { if(zpA.zone1 < zpB.zone1) return true; if(zpA.zone1 > zpB.zone1) return false; if(zpA.zone2 < zpB.zone2) return true; return false; } }; //--Definition of the zone connectivity (a vector of vertex pairs for 2 zones). struct ZoneConnectivity { // Internal structures struct VertexPair // Pairs of vertices. Ordered based on // zone indices. { MVertex *vertex; int vertexIndex1; int vertexIndex2; // Constructors VertexPair() : vertex(0), vertexIndex1(0), vertexIndex2(0) {} VertexPair(MVertex *const _vertex, const int zone1, const int zone2, const int _vertexIndex1, const int _vertexIndex2) : vertex(_vertex), vertexIndex1(_vertexIndex1), vertexIndex2(_vertexIndex2) { if(zone2 < zone1) { vertexIndex1 = _vertexIndex2; vertexIndex2 = _vertexIndex1; } } }; // Data std::vector<VertexPair> vertexPairVec; // Constructor ZoneConnectivity() { vertexPairVec.reserve(32); // Avoid small reallocations by push_back() } }; struct ZoneConnectivityByElem { // Internal structures struct ElementPair // Pairs of elements. Ordered based on // zone indices { int elemIndex1; int elemIndex2; // Constructors ElementPair() : elemIndex1(0), elemIndex2(0) {} ElementPair(const int zone1, const int zone2, const int _elemIndex1, const int _elemIndex2) : elemIndex1(_elemIndex1), elemIndex2(_elemIndex2) { if(zone2 < zone1) { elemIndex1 = _elemIndex2; elemIndex2 = _elemIndex1; } } }; // Data std::vector<ElementPair> elemPairVec; // Constructor ZoneConnectivityByElem() { elemPairVec.reserve(32); // Avoid small reallocations by push_back() } }; //--Output type for zone connectivity typedef std::map<ZonePair, ZoneConnectivity, Less_ZonePair> ZoneConnMap; /*--------------------------------------------------------------------* * Boundaries at the domain extent *--------------------------------------------------------------------*/ struct VertexBoundary { int zoneIndex; int bcPatchIndex; SVector3 normal; MVertex *vertex; int vertexIndex; // Constructors VertexBoundary() : vertex(0), vertexIndex(0) {} VertexBoundary(const int _zoneIndex, const int _bcPatchIndex, const SVector3 &_normal, MVertex *const _vertex, const int _vertexIndex) : zoneIndex(_zoneIndex), bcPatchIndex(_bcPatchIndex), normal(_normal), vertex(_vertex), vertexIndex(_vertexIndex) { } }; typedef std::vector<VertexBoundary> ZoneBoVec; //--Function object for sorting the ZoneBoVec vector by zone and then BC patch //--index struct ZoneBoVecSort { bool operator()(const int i0, const int i1) { if(zoneBoVec[i0].zoneIndex == zoneBoVec[i1].zoneIndex) return zoneBoVec[i0].bcPatchIndex < zoneBoVec[i1].bcPatchIndex; return zoneBoVec[i0].zoneIndex < zoneBoVec[i1].zoneIndex; } ZoneBoVecSort(const ZoneBoVec &_zoneBoVec) : zoneBoVec(_zoneBoVec) {} private: const ZoneBoVec &zoneBoVec; }; struct ElementBoundary { int zoneIndex; int bcPatchIndex; SVector3 normal; int elemIndex; // Constructors ElementBoundary() : elemIndex(0) {} ElementBoundary(const int _zoneIndex, const int _bcPatchIndex, const SVector3 &_normal, const int _elemIndex) : zoneIndex(_zoneIndex), bcPatchIndex(_bcPatchIndex), normal(_normal), elemIndex(_elemIndex) { } }; /******************************************************************************* * * class: MZoneBoundary * * Purpose * ======= * * Determines the connectivity between zones (internal boundaries) and * vertices/elements at the extent of the domain (external boundaries) * * Template parameters: * DIM - dimension of the problem * * Notes * ===== * * - explicitly instantiated in 'MZoneBoundary.cpp' * - this class uses some explicit memory management. Call preInit() before * constructing any class MZoneBoundary and postDestroy() after all * MZoneBoundary classes have been destroyed. * ******************************************************************************/ template <unsigned DIM> class MZoneBoundary { /*============================================================================== * Internal types *============================================================================*/ //--Type of face (MEdge or MFace) private: typedef typename DimTr<DIM>::FaceT FaceT; //--Data stored for connectivity of vertices public: template <typename FaceT> struct GlobalVertexData { struct FaceDataB { // NBN: cannot use a FaceT object in FaceVector. // class FaceT has embedded std::vector objects; // custom allocator for FaceVector<T> does not call ctors, // but std:: dtors will try to delete _v, _si // // Simple fix: use a pointer to FaceT, then build the // FaceT object once the FaceDataB structure has been // safely added to the container (push_back) // FaceT face; // NBN: FaceT contains std:: containers FaceT *face; // NBN: use FaceT* (then init in two steps) MElement *parentElement; int parentFace; int faceIndex; int zoneIndex; FaceDataB(const int _zoneIndex, const typename MZone<DIM>::BoFaceMap::const_iterator bFMapIt) : // face(bFMapIt->first), face(nullptr), // NBN: need to load this after insertion into container parentElement(bFMapIt->second.parentElement), parentFace(bFMapIt->second.parentFace), faceIndex(bFMapIt->second.faceIndex), zoneIndex(_zoneIndex) { } // private: // Default constructor should be private ... but // currently // fails on some compilers (earlier versions of g++?) // The default constructor is required by 'set_offsets()' in // class 'FaceAllocator'. This is invoked by preInit() below. FaceDataB(); friend class CCon::FaceAllocator<FaceDataB>; }; struct ZoneData { int vertexIndex; int zoneIndex; ZoneData(const int _vertexIndex, const int _zoneIndex) : vertexIndex(_vertexIndex), zoneIndex(_zoneIndex) { } // private: // Default constructor should be private ... but // currently // fails on some compilers (earlier versions of g++?) // The default constructor is required by 'set_offsets()' in // class 'FaceAllocator'. This is invoked by preInit() below. ZoneData() : vertexIndex(0), zoneIndex(0) // NBN: init members { } friend class CCon::FaceAllocator<ZoneData>; }; CCon::FaceVector<FaceDataB> faces; CCon::FaceVector<ZoneData> zoneData; // A 'FaceVector' is not strictly // optimized for the vertices but should // still work quite well. // Constructor GlobalVertexData() {} }; private: typedef std::map<const MVertex *, GlobalVertexData<FaceT>, std::less<const MVertex *> > GlobalBoVertexMap; /*============================================================================== * Member functions *============================================================================*/ public: //--Reset the database void clear() { // NBN: using FaceT* so need to dealloc: int icount = 0; typename GlobalBoVertexMap::iterator itEnd = globalBoVertMap.end(); for(typename GlobalBoVertexMap::iterator itBoV = globalBoVertMap.begin(); itBoV != itEnd; ++itBoV) { // ... clear the faces GlobalVertexData<FaceT> &ref = itBoV->second; std::size_t nf = ref.faces.size(); for(std::size_t i = 0; i < nf; ++i) { ++icount; FaceT *p = ref.faces[i].face; if(p) { delete(p); p = nullptr; } } } Msg::Info("cleared %d faces.", icount); // finally, clear the container globalBoVertMap.clear(); } //--Add a zone to the global map of boundary vertices and return connectivity //--between zones. int interiorBoundaryVertices(const int newZoneIndex, const MZone<DIM> &mZone, ZoneConnMap &zoneConnMap); //--Return exterior boundary vertices (unconnected vertices at the extent of //the //--domain int exteriorBoundaryVertices(const int normalSource, ZoneBoVec &zoneBoVec); //--Memory management static void preInit() { CCon::FaceVector< typename GlobalVertexData<FaceT>::FaceDataB>::init_memory(); CCon::FaceVector<typename GlobalVertexData<FaceT>::ZoneData>::init_memory(); } static void postDestroy() { CCon::FaceVector< typename GlobalVertexData<FaceT>::FaceDataB>::release_memory(); CCon::FaceVector< typename GlobalVertexData<FaceT>::ZoneData>::release_memory(); } /*============================================================================== * Member data *============================================================================*/ private: //--Data members GlobalBoVertexMap globalBoVertMap; // Map of unconnected boundary vertices // for the entire domain }; #endif
if-clause.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid; int a[n],suma=0,sumalocal; if(argc < 2) { fprintf(stderr,"[ERROR]-Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) { a[i] = i; } #pragma omp parallel if(n>4) default(none) \ private(sumalocal,tid) shared(a,suma,n) //No se ejecutará en paralelo si el n < 4 { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } return(0); }
ten_tusscher_2004_epi_S2_6.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_6.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4894492745523,0.00131195917143657,0.777766728508175,0.777585290631298,0.000176758153308886,0.484269955752236,0.00295685208736128,0.999998321836575,1.95906508827956e-08,1.91104228355852e-05,0.999780202348919,1.00758709900846,0.999999232044404,3.34860480724804e-05,1.14889991276436,9.63921436951658,139.651596269085}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.9638301021152,0.000517766924545127,0.000156086968039485,0.000506187557537279,0.270335599525829,0.157560173074360,0.171504329558663,4.25106989337320,0.0167287504279960,2.10808214267720,1099.67407156799,0.000527967816332930,0.290348999546199,0.0200000000000000,0.00458996698216110,9.00005264542678e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
stencil_opt4.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "malloc2D.h" #include "timer.h" #define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp) int main(int argc, char *argv[]) { #pragma omp parallel if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads()); struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total; double init_time, flush_time, stencil_time, total_time; int imax=2002, jmax = 2002; double** xtmp; double** x = malloc2D(jmax, imax); double** xnew = malloc2D(jmax, imax); int *flush = (int *)malloc(jmax*imax*sizeof(int)*4); cpu_timer_start(&tstart_total); cpu_timer_start(&tstart_init); #pragma omp parallel for for (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ xnew[j][i] = 0.0; x[j][i] = 5.0; } } #pragma omp parallel for for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){ for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } } init_time += cpu_timer_stop(tstart_init); #pragma omp parallel { int thread_id = omp_get_thread_num(); for (int iter = 0; iter < 10000; iter++){ if (thread_id ==0) cpu_timer_start(&tstart_flush); #pragma omp for nowait for (int l = 1; l < jmax*imax*4; l++){ flush[l] = 1.0; } if (thread_id == 0){ flush_time += cpu_timer_stop(tstart_flush); cpu_timer_start(&tstart_stencil); } #pragma omp for for (int j = 1; j < jmax-1; j++){ for (int i = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0; } } if (thread_id == 0){ stencil_time += cpu_timer_stop(tstart_stencil); SWAP_PTR(xnew, x, xtmp); if (iter%1000 == 0) printf("Iter %d\n",iter); } } } // end omp parallel total_time += cpu_timer_stop(tstart_total); printf("Timing is init %f flush %f stencil %f total %f\n",init_time,flush_time,stencil_time,total_time); free(x); free(xnew); free(flush); }
omp_taskloop_num_tasks.c
// RUN: %libomp-compile-and-run // RUN: %libomp-compile && env KMP_TASKLOOP_MIN_TASKS=1 %libomp-run // These compilers don't support the taskloop construct // UNSUPPORTED: gcc-4, gcc-5, icc-16 /* * Test for taskloop * Method: caculate how many times the iteration space is dispatched * and judge if each dispatch has the requested grainsize * It is possible for two adjacent chunks are executed by the same thread */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include "omp_testsuite.h" #define CFDMAX_SIZE 1120 int test_omp_taskloop_num_tasks() { int i; int *tids; int *tidsArray; int count; int result = 0; int num_tasks; for (num_tasks = 1; num_tasks < 120; ++num_tasks) { count = 0; tidsArray = (int *)malloc(sizeof(int) * CFDMAX_SIZE); tids = tidsArray; #pragma omp parallel shared(tids) { int i; #pragma omp master #pragma omp taskloop num_tasks(num_tasks) for (i = 0; i < CFDMAX_SIZE; i++) { tids[i] = omp_get_thread_num(); } } for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } if (count > num_tasks) { fprintf(stderr, "counted too many tasks: (wanted %d, got %d)\n", num_tasks, count); result++; } } return (result==0); } int main() { int i; int num_failed=0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_taskloop_num_tasks()) { num_failed++; } } return num_failed; }
grid_ao_drv.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <stdlib.h> #include <math.h> #include <complex.h> #include "config.h" #include "grid_ao_drv.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) double CINTcommon_fac_sp(int l); void GTOnabla1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double a) { int i, n; double a2 = -2 * a; for (n = 0; n < SIMDD; n++) { fx1[n] = a2*fx0[SIMDD+n]; fy1[n] = a2*fy0[SIMDD+n]; fz1[n] = a2*fz0[SIMDD+n]; } for (i = 1; i <= l; i++) { for (n = 0; n < SIMDD; n++) { fx1[i*SIMDD+n] = i*fx0[(i-1)*SIMDD+n] + a2*fx0[(i+1)*SIMDD+n]; fy1[i*SIMDD+n] = i*fy0[(i-1)*SIMDD+n] + a2*fy0[(i+1)*SIMDD+n]; fz1[i*SIMDD+n] = i*fz0[(i-1)*SIMDD+n] + a2*fz0[(i+1)*SIMDD+n]; } } } /* * r - R_O = (r-R_i) + ri, ri = (x,y,z) = R_i - R_O */ void GTOx1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double *ri) { int i, n; for (i = 0; i <= l; i++) { for (n = 0; n < SIMDD; n++) { fx1[i*SIMDD+n] = ri[0] * fx0[i*SIMDD+n] + fx0[(i+1)*SIMDD+n]; fy1[i*SIMDD+n] = ri[1] * fy0[i*SIMDD+n] + fy0[(i+1)*SIMDD+n]; fz1[i*SIMDD+n] = ri[2] * fz0[i*SIMDD+n] + fz0[(i+1)*SIMDD+n]; } } } int GTOprim_exp(double *eprim, double *coord, double *alpha, double *coeff, int l, int nprim, int nctr, size_t ngrids, double fac) { int i, j; double arr, maxc; double logcoeff[nprim]; double rr[ngrids]; double *gridx = coord; double *gridy = coord+BLKSIZE; double *gridz = coord+BLKSIZE*2; int not0 = 0; // the maximum value of the coefficients for each pGTO for (j = 0; j < nprim; j++) { maxc = 0; for (i = 0; i < nctr; i++) { maxc = MAX(maxc, fabs(coeff[i*nprim+j])); } logcoeff[j] = log(maxc); } for (i = 0; i < ngrids; i++) { rr[i] = gridx[i]*gridx[i] + gridy[i]*gridy[i] + gridz[i]*gridz[i]; } for (j = 0; j < nprim; j++) { for (i = 0; i < ngrids; i++) { arr = alpha[j] * rr[i]; if (arr-logcoeff[j] < EXPCUTOFF) { eprim[j*BLKSIZE+i] = exp(-arr) * fac; not0 = 1; } else { eprim[j*BLKSIZE+i] = 0; } } } return not0; } // grid2atm[atm_id,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *coord, size_t bgrids, size_t ngrids, int *atm, int natm, int *bas, int nbas, double *env) { int atm_id; size_t ig; double *r_atm; for (atm_id = 0; atm_id < natm; atm_id++) { r_atm = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; for (ig = 0; ig < bgrids; ig++) { grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - r_atm[0]; } for (ig = 0; ig < bgrids; ig++) { grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - r_atm[1]; } for (ig = 0; ig < bgrids; ig++) { grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - r_atm[2]; } grid2atm += 3*BLKSIZE; } } static void _dset0(double *out, size_t odim, size_t bgrids, int counts) { size_t i, j; for (i = 0; i < counts; i++) { for (j = 0; j < bgrids; j++) { out[i*odim+j] = 0; } } } static void _zset0(double complex *out, size_t odim, size_t bgrids, int counts) { size_t i, j; for (i = 0; i < counts; i++) { for (j = 0; j < bgrids; j++) { out[i*odim+j] = 0; } } } void GTOeval_sph_iter(FPtr_eval feval, FPtr_exp fexp, double fac, size_t nao, size_t ngrids, size_t bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; int i, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id; size_t di; double fac1; double *p_exp, *pcoeff, *pcoord, *pcart, *ri, *pao; double *grid2atm = ALIGN8_UP(buf); // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; double *cart_gto = eprim + NPRIMAX*BLKSIZE*2; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { dcart = (l+1)*(l+2)/2; di = nc * dcart; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; if (l <= 1) { // s, p functions (*feval)(ao+ao_id*ngrids, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nao, ngrids, bgrids); } else { (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, di, bgrids, bgrids); pcart = cart_gto; for (i = 0; i < ncomp; i++) { pao = ao + (i*nao+ao_id)*ngrids; for (k = 0; k < nc; k++) { CINTc2s_ket_sph1(pao, pcart, ngrids, bgrids, l); pao += deg * ngrids; pcart += dcart * bgrids; } } } } else { for (i = 0; i < ncomp; i++) { _dset0(ao+(i*nao+ao_id)*ngrids, ngrids, bgrids, nc*deg); } } } } void GTOeval_cart_iter(FPtr_eval feval, FPtr_exp fexp, double fac, size_t nao, size_t ngrids, size_t bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; int i, l, np, nc, atm_id, bas_id, deg, ao_id; double fac1; double *p_exp, *pcoeff, *pcoord, *ri; double *grid2atm = ALIGN8_UP(buf); // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = (l+1)*(l+2)/2; fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(ao+ao_id*ngrids, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nao, ngrids, bgrids); } else { for (i = 0; i < ncomp; i++) { _dset0(ao+(i*nao+ao_id)*ngrids, ngrids, bgrids, nc*deg); } } } } void GTOeval_spinor_iter(FPtr_eval feval, FPtr_exp fexp, void (*c2s)(), double fac, size_t nao, size_t ngrids, size_t bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double complex *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp_e1 = param[POS_E1]; const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; int i, l, np, nc, atm_id, bas_id, deg, kappa, dcart, ao_id; size_t off, di; double fac1; double *p_exp, *pcoeff, *pcoord, *pcart, *ri; double complex *aoa = ao; double complex *aob = ao + ncomp*nao*ngrids; double *grid2atm = ALIGN8_UP(buf); // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; double *cart_gto = eprim + NPRIMAX*BLKSIZE*2; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = CINTlen_spinor(bas_id, bas); fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { kappa = bas[bas_id*BAS_SLOTS+KAPPA_OF]; dcart = (l+1)*(l+2)/2; di = nc * dcart; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, di, bgrids, bgrids); for (i = 0; i < ncomp; i++) { pcart = cart_gto + i * di*bgrids*ncomp_e1; off = (i*nao+ao_id)*ngrids; (*c2s)(aoa+off, aob+off, pcart, ngrids, bgrids, nc, kappa, l); } } else { for (i = 0; i < ncomp; i++) { off = (i*nao+ao_id)*ngrids; _zset0(aoa+off, ngrids, bgrids, nc*deg); _zset0(aob+off, ngrids, bgrids, nc*deg); } } } } int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int ish, nshblk, lastatm; shloc[0] = sh0; nshblk = 1; lastatm = bas[BAS_SLOTS*sh0+ATOM_OF]; for (ish = sh0; ish < sh1; ish++) { if (lastatm != bas[BAS_SLOTS*ish+ATOM_OF]) { lastatm = bas[BAS_SLOTS*ish+ATOM_OF]; shloc[nshblk] = ish; nshblk++; } } shloc[nshblk] = sh1; return nshblk; } /* * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void GTOeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp, double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; #pragma omp parallel { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const size_t nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff, bgrids; int ncart = NCTR_CART * param[TENSOR] * param[POS_E1]; double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart)); #pragma omp for schedule(dynamic, 4) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; aoff = ao_loc[ish] - ao_loc[sh0]; ib = k - iloc * nblk; ip = ib * BLKSIZE; bgrids = MIN(ngrids-ip, BLKSIZE); (*fiter)(feval, fexp, fac, nao, Ngrids, bgrids, param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip, coord+ip, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } } void GTOeval_sph_drv(FPtr_eval feval, FPtr_exp fexp, double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { GTOeval_loop(GTOeval_sph_iter, feval, fexp, fac, ngrids, param, shls_slice, ao_loc, ao, coord, non0table, atm, natm, bas, nbas, env); } void GTOeval_cart_drv(FPtr_eval feval, FPtr_exp fexp, double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { GTOeval_loop(GTOeval_cart_iter, feval, fexp, fac, ngrids, param, shls_slice, ao_loc, ao, coord, non0table, atm, natm, bas, nbas, env); } void GTOeval_spinor_drv(FPtr_eval feval, FPtr_exp fexp, void (*c2s)(), double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double complex *ao, double *coord, uint8_t *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; #pragma omp parallel { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const size_t nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff, bgrids; int ncart = NCTR_CART * param[TENSOR] * param[POS_E1]; double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart)); #pragma omp for schedule(dynamic, 4) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; aoff = ao_loc[ish] - ao_loc[sh0]; ib = k - iloc * nblk; ip = ib * BLKSIZE; bgrids = MIN(ngrids-ip, BLKSIZE); GTOeval_spinor_iter(feval, fexp, c2s, fac, nao, Ngrids, bgrids, param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip, coord+ip, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } }
GB_unop__asin_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asin_fc32_fc32) // op(A') function: GB (_unop_tran__asin_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = casinf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = casinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASIN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asin_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asin_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Interp1PrimFifthOrderWENO.c
/*! @file Interp1PrimFifthOrderWENO.c * @brief WENO5 Scheme (Component-wise application to vectors). * @author Debojyoti Ghosh */ #include <stdlib.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 5th order WENO reconstruction (component-wise) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the fifth order WENO scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order WENO numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ \hat{\bf f}_{j+1/2}^1 = \frac{1}{3} {\bf f}_{j-2} - \frac{7}{6} {\bf f}_{j-1} + \frac{11}{6} {\bf f}_j \right]\\ + &\ \omega_2\ \times\ \left[ \hat{\bf f}_{j+1/2}^2 = -\frac{1}{6} {\bf f}_{j-1} + \frac{5}{6} {\bf f}_j + \frac{1}{3} {\bf f}_{j+1} \right]\\ + &\ \omega_3\ \times\ \left[ \hat{\bf f}_{j+1/2}^3 = \frac{1}{3} {\bf f}_j + \frac{5}{6} {\bf f}_{j+1} - \frac{1}{6} {\bf f}_{j+2} \right]\\ \Rightarrow &\ \hat{\bf f}_{j+1/2} = \frac{\omega_1}{3} {\bf f}_{j-2} - \frac{1}{6}(7\omega_1+\omega_2){\bf f}_{j-1} + \frac{1}{6}(11\omega_1+5\omega_2+2\omega_3){\bf f}_j + \frac{1}{6}(2\omega_2+5\omega_3){\bf f}_{j+1} - \frac{\omega_3}{6}{\bf f}_{j+2}, \f} where \f$\omega_k; k=1,2,3\f$ are the nonlinear WENO weights computed in WENOFifthOrderCalculateWeights() (note that the \f$\omega\f$ are different for each component of the vector \f$\hat{\bf f}\f$). \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The scalar interpolation method is applied to the vector function in a component-wise manner. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). \b Reference: + Jiang, G.-S., Shu, C.-W., Efficient Implementation of Weighted ENO Schemes, J. Comput. Phys., 126 (1), 1996, pp. 202-228, http://dx.doi.org/10.1006/jcph.1996.0130 */ int Interp1PrimFifthOrderWENO( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double one_sixth = 1.0/6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); int i; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1,qm2,qm3,qp1,qp2,p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); if (upw > 0) { indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 - 2*stride[dir]; qm2 = qm1 - stride[dir]; qp1 = qm1 + stride[dir]; qp2 = qm1 + 2*stride[dir]; } else { indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 + 2*stride[dir]; qm2 = qm1 + stride[dir]; qp1 = qm1 - stride[dir]; qp2 = qm1 - 2*stride[dir]; } /* Defining stencil points */ double *fm3, *fm2, *fm1, *fp1, *fp2; fm3 = (fC+qm3*nvars); fm2 = (fC+qm2*nvars); fm1 = (fC+qm1*nvars); fp1 = (fC+qp1*nvars); fp2 = (fC+qp2*nvars); /* Candidate stencils and their optimal weights*/ double f1[nvars], f2[nvars], f3[nvars]; _ArrayAXBYCZ_(f1,(2*one_sixth),fm3,(-7*one_sixth) ,fm2,(11*one_sixth) ,fm1,nvars); _ArrayAXBYCZ_(f2,(-one_sixth) ,fm2,(5*one_sixth) ,fm1,(2*one_sixth) ,fp1,nvars); _ArrayAXBYCZ_(f3,(2*one_sixth),fm1,(5*one_sixth) ,fp1,(-one_sixth) ,fp2,nvars); /* calculate WENO weights */ double *w1,*w2,*w3; w1 = (ww1+p*nvars); w2 = (ww2+p*nvars); w3 = (ww3+p*nvars); _ArrayMultiply3Add1D_((fI+p*nvars),w1,f1,w2,f2,w3,f3,nvars); } } return(0); }
structDef-postpass.c
typedef unsigned int size_t; typedef unsigned char __u_char; typedef unsigned short int __u_short; typedef unsigned int __u_int; typedef unsigned long int __u_long; typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef signed short int __int16_t; typedef unsigned short int __uint16_t; typedef signed int __int32_t; typedef unsigned int __uint32_t; __extension__ typedef signed long long int __int64_t; __extension__ typedef unsigned long long int __uint64_t; __extension__ typedef long long int __quad_t; __extension__ typedef unsigned long long int __u_quad_t; __extension__ typedef __u_quad_t __dev_t; __extension__ typedef unsigned int __uid_t; __extension__ typedef unsigned int __gid_t; __extension__ typedef unsigned long int __ino_t; __extension__ typedef __u_quad_t __ino64_t; __extension__ typedef unsigned int __mode_t; __extension__ typedef unsigned int __nlink_t; __extension__ typedef long int __off_t; __extension__ typedef __quad_t __off64_t; __extension__ typedef int __pid_t; __extension__ struct stUn_imopVarPre0 { int __val[2]; } ; __extension__ typedef struct stUn_imopVarPre0 __fsid_t; __extension__ typedef long int __clock_t; __extension__ typedef unsigned long int __rlim_t; __extension__ typedef __u_quad_t __rlim64_t; __extension__ typedef unsigned int __id_t; __extension__ typedef long int __time_t; __extension__ typedef unsigned int __useconds_t; __extension__ typedef long int __suseconds_t; __extension__ typedef int __daddr_t; __extension__ typedef int __key_t; __extension__ typedef int __clockid_t; __extension__ typedef void *__timer_t; __extension__ typedef long int __blksize_t; __extension__ typedef long int __blkcnt_t; __extension__ typedef __quad_t __blkcnt64_t; __extension__ typedef unsigned long int __fsblkcnt_t; __extension__ typedef __u_quad_t __fsblkcnt64_t; __extension__ typedef unsigned long int __fsfilcnt_t; __extension__ typedef __u_quad_t __fsfilcnt64_t; __extension__ typedef int __fsword_t; __extension__ typedef int __ssize_t; __extension__ typedef long int __syscall_slong_t; __extension__ typedef unsigned long int __syscall_ulong_t; typedef __off64_t __loff_t; typedef __quad_t *__qaddr_t; typedef char *__caddr_t; __extension__ typedef int __intptr_t; __extension__ typedef unsigned int __socklen_t; struct _IO_FILE ; typedef struct _IO_FILE FILE; typedef struct _IO_FILE __FILE; struct stUn_imopVarPre2 { int __count; union stUn_imopVarPre1 { unsigned int __wch; char __wchb[4]; } __value; } ; typedef struct stUn_imopVarPre2 __mbstate_t; struct stUn_imopVarPre3 { __off_t __pos; __mbstate_t __state; } ; typedef struct stUn_imopVarPre3 _G_fpos_t; struct stUn_imopVarPre4 { __off64_t __pos; __mbstate_t __state; } ; typedef struct stUn_imopVarPre4 _G_fpos64_t; typedef __builtin_va_list __gnuc_va_list; struct _IO_jump_t ; struct _IO_FILE ; typedef void _IO_lock_t; struct _IO_marker { struct _IO_marker *_next; struct _IO_FILE *_sbuf; int _pos; } ; enum __codecvt_result { __codecvt_ok, __codecvt_partial , __codecvt_error , __codecvt_noconv } ; struct _IO_FILE { int _flags; char *_IO_read_ptr; char *_IO_read_end; char *_IO_read_base; char *_IO_write_base; char *_IO_write_ptr; char *_IO_write_end; char *_IO_buf_base; char *_IO_buf_end; char *_IO_save_base; char *_IO_backup_base; char *_IO_save_end; struct _IO_marker *_markers; struct _IO_FILE *_chain; int _fileno; int _flags2; __off_t _old_offset; unsigned short _cur_column; signed char _vtable_offset; char _shortbuf[1]; _IO_lock_t *_lock; __off64_t _offset; void *__pad1; void *__pad2; void *__pad3; void *__pad4; size_t __pad5; int _mode; char _unused2[15 * sizeof(int) - 4 * sizeof(void *) - sizeof(size_t)]; } ; typedef struct _IO_FILE _IO_FILE; struct _IO_FILE_plus ; typedef __gnuc_va_list va_list; typedef __off_t off_t; typedef __ssize_t ssize_t; typedef _G_fpos_t fpos_t; extern struct _IO_FILE *stdout; extern struct _IO_FILE *stderr; extern int fclose(FILE *__stream); extern int fflush(FILE *__stream); extern FILE *fopen(const char *__restrict __filename, const char *__restrict __modes); extern int fprintf(FILE *__restrict __stream, const char *__restrict __format, ...); extern int sprintf(char *__restrict __s, const char *__restrict __format, ...); extern int snprintf(char *__restrict __s, size_t __maxlen , const char *__restrict __format, ...); extern int fscanf(FILE *__restrict __stream, const char *__restrict __format, ...); extern int fscanf(FILE *__restrict __stream, const char *__restrict __format, ...); typedef unsigned int wchar_t; enum enum_imopVarPre5 { P_ALL, P_PID , P_PGID } ; typedef enum enum_imopVarPre5 idtype_t; union wait { int w_status; struct stUn_imopVarPre6 { unsigned int __w_termsig: 7; unsigned int __w_coredump: 1; unsigned int __w_retcode: 8; unsigned int :16; } __wait_terminated; struct stUn_imopVarPre7 { unsigned int __w_stopval: 8; unsigned int __w_stopsig: 8; unsigned int :16; } __wait_stopped; } ; union stUn_imopVarPre8 { union wait *__uptr; int *__iptr; } ; typedef union stUn_imopVarPre8 __WAIT_STATUS; struct stUn_imopVarPre9 { int quot; int rem; } ; typedef struct stUn_imopVarPre9 div_t; struct stUn_imopVarPre10 { long int quot; long int rem; } ; typedef struct stUn_imopVarPre10 ldiv_t; __extension__ struct stUn_imopVarPre11 { long long int quot; long long int rem; } ; __extension__ typedef struct stUn_imopVarPre11 lldiv_t; extern int atoi(const char *__nptr); typedef __u_char u_char; typedef __u_short u_short; typedef __u_int u_int; typedef __u_long u_long; typedef __quad_t quad_t; typedef __u_quad_t u_quad_t; typedef __fsid_t fsid_t; typedef __loff_t loff_t; typedef __ino_t ino_t; typedef __dev_t dev_t; typedef __gid_t gid_t; typedef __mode_t mode_t; typedef __nlink_t nlink_t; typedef __uid_t uid_t; typedef __pid_t pid_t; typedef __id_t id_t; typedef __daddr_t daddr_t; typedef __caddr_t caddr_t; typedef __key_t key_t; typedef __clock_t clock_t; typedef __time_t time_t; typedef __clockid_t clockid_t; typedef __timer_t timer_t; typedef unsigned long int ulong; typedef unsigned short int ushort; typedef unsigned int uint; typedef int int8_t; typedef int int16_t; typedef int int32_t; typedef int int64_t; typedef unsigned int u_int8_t; typedef unsigned int u_int16_t; typedef unsigned int u_int32_t; typedef unsigned int u_int64_t; typedef int register_t; typedef int __sig_atomic_t; struct stUn_imopVarPre12 { unsigned long int __val[(1024 / (8 * sizeof(unsigned long int)))]; } ; typedef struct stUn_imopVarPre12 __sigset_t; typedef __sigset_t sigset_t; struct timespec { __time_t tv_sec; __syscall_slong_t tv_nsec; } ; struct timeval { __time_t tv_sec; __suseconds_t tv_usec; } ; typedef __suseconds_t suseconds_t; typedef long int __fd_mask; struct stUn_imopVarPre13 { __fd_mask __fds_bits[1024 / (8 * (int) sizeof(__fd_mask))]; } ; typedef struct stUn_imopVarPre13 fd_set; typedef __fd_mask fd_mask; typedef __blksize_t blksize_t; typedef __blkcnt_t blkcnt_t; typedef __fsblkcnt_t fsblkcnt_t; typedef __fsfilcnt_t fsfilcnt_t; typedef unsigned long int pthread_t; union pthread_attr_t { char __size[36]; long int __align; } ; typedef union pthread_attr_t pthread_attr_t; struct __pthread_internal_slist { struct __pthread_internal_slist *__next; } ; typedef struct __pthread_internal_slist __pthread_slist_t; union stUn_imopVarPre14 { struct __pthread_mutex_s { int __lock; unsigned int __count; int __owner; int __kind; unsigned int __nusers; } __data; char __size[24]; long int __align; } ; typedef union stUn_imopVarPre14 pthread_mutex_t; union stUn_imopVarPre15 { char __size[4]; long int __align; } ; typedef union stUn_imopVarPre15 pthread_mutexattr_t; union stUn_imopVarPre17 { struct stUn_imopVarPre16 { int __lock; unsigned int __futex; __extension__ unsigned long long int __total_seq; __extension__ unsigned long long int __wakeup_seq; __extension__ unsigned long long int __woken_seq; void *__mutex; unsigned int __nwaiters; unsigned int __broadcast_seq; } __data; char __size[48]; __extension__ long long int __align; } ; typedef union stUn_imopVarPre17 pthread_cond_t; union stUn_imopVarPre18 { char __size[4]; long int __align; } ; typedef union stUn_imopVarPre18 pthread_condattr_t; typedef unsigned int pthread_key_t; typedef int pthread_once_t; union stUn_imopVarPre20 { struct stUn_imopVarPre19 { int __lock; unsigned int __nr_readers; unsigned int __readers_wakeup; unsigned int __writer_wakeup; unsigned int __nr_readers_queued; unsigned int __nr_writers_queued; unsigned char __flags; unsigned char __shared; unsigned char __pad1; unsigned char __pad2; int __writer; } __data; char __size[32]; long int __align; } ; typedef union stUn_imopVarPre20 pthread_rwlock_t; union stUn_imopVarPre21 { char __size[8]; long int __align; } ; typedef union stUn_imopVarPre21 pthread_rwlockattr_t; typedef volatile int pthread_spinlock_t; union stUn_imopVarPre22 { char __size[20]; long int __align; } ; typedef union stUn_imopVarPre22 pthread_barrier_t; union stUn_imopVarPre23 { char __size[4]; int __align; } ; typedef union stUn_imopVarPre23 pthread_barrierattr_t; struct random_data { int32_t *fptr; int32_t *rptr; int32_t *state; int rand_type; int rand_deg; int rand_sep; int32_t *end_ptr; } ; struct drand48_data { unsigned short int __x[3]; unsigned short int __old_x[3]; unsigned short int __c; unsigned short int __init; __extension__ unsigned long long int __a; } ; extern void *malloc(size_t __size); extern void exit(int __status); typedef int ( *__compar_fn_t )(const void *, const void *); extern int getloadavg(double __loadavg[], int __nelem); typedef float float_t; typedef double double_t; extern double pow(double __x, double __y); enum enum_imopVarPre24 { FP_NAN = 0, FP_INFINITE = 1 , FP_ZERO = 2 , FP_SUBNORMAL = 3 , FP_NORMAL = 4 } ; enum enum_imopVarPre25 { _IEEE_ = -1, _SVID_ , _XOPEN_ , _POSIX_ , _ISOC_ } ; typedef enum enum_imopVarPre25 _LIB_VERSION_TYPE; struct exception { int type; char *name; double arg1; double arg2; double retval; } ; typedef int ptrdiff_t; struct stUn_imopVarPre26 { long long __max_align_ll; long double __max_align_ld; } ; typedef struct stUn_imopVarPre26 max_align_t; extern char *strcpy(char *__restrict __dest, const char *__restrict __src); struct __locale_struct { struct __locale_data *__locales[13]; const unsigned short int *__ctype_b; const int *__ctype_tolower; const int *__ctype_toupper; const char *__names[13]; } ; typedef struct __locale_struct *__locale_t; typedef __locale_t locale_t; struct timezone { int tz_minuteswest; int tz_dsttime; } ; typedef struct timezone *__restrict __timezone_ptr_t; extern int gettimeofday(struct timeval *__restrict __tv, __timezone_ptr_t __tz); enum __itimer_which { ITIMER_REAL = 0, ITIMER_VIRTUAL = 1 , ITIMER_PROF = 2 } ; struct itimerval { struct timeval it_interval; struct timeval it_value; } ; typedef int __itimer_which_t; struct tm { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; int tm_year; int tm_wday; int tm_yday; int tm_isdst; long int tm_gmtoff; const char *tm_zone; } ; struct itimerspec { struct timespec it_interval; struct timespec it_value; } ; struct sigevent ; extern time_t time(time_t *__timer); extern size_t strftime(char *__restrict __s, size_t __maxsize , const char *__restrict __format , const struct tm *__restrict __tp); extern struct tm *gmtime(const time_t *__timer); struct utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char __domainname[65]; } ; extern int uname(struct utsname *__name); enum __rlimit_resource { RLIMIT_CPU = 0, RLIMIT_FSIZE = 1 , RLIMIT_DATA = 2 , RLIMIT_STACK = 3 , RLIMIT_CORE = 4 , __RLIMIT_RSS = 5 , RLIMIT_NOFILE = 7 , __RLIMIT_OFILE = RLIMIT_NOFILE , RLIMIT_AS = 9 , __RLIMIT_NPROC = 6 , __RLIMIT_MEMLOCK = 8 , __RLIMIT_LOCKS = 10 , __RLIMIT_SIGPENDING = 11 , __RLIMIT_MSGQUEUE = 12 , __RLIMIT_NICE = 13 , __RLIMIT_RTPRIO = 14 , __RLIMIT_RTTIME = 15 , __RLIMIT_NLIMITS = 16 , __RLIM_NLIMITS = __RLIMIT_NLIMITS } ; typedef __rlim_t rlim_t; struct rlimit { rlim_t rlim_cur; rlim_t rlim_max; } ; enum __rusage_who { RUSAGE_SELF = 0, RUSAGE_CHILDREN = -1 } ; struct rusage { struct timeval ru_utime; struct timeval ru_stime; } ; enum __priority_which { PRIO_PROCESS = 0, PRIO_PGRP = 1 , PRIO_USER = 2 } ; typedef int __rlimit_resource_t; typedef int __rusage_who_t; typedef int __priority_which_t; extern char *__xpg_basename(char *__path); typedef __useconds_t useconds_t; typedef __intptr_t intptr_t; typedef __socklen_t socklen_t; enum enum_imopVarPre27 { _PC_LINK_MAX, _PC_MAX_CANON , _PC_MAX_INPUT , _PC_NAME_MAX , _PC_PATH_MAX , _PC_PIPE_BUF , _PC_CHOWN_RESTRICTED , _PC_NO_TRUNC , _PC_VDISABLE , _PC_SYNC_IO , _PC_ASYNC_IO , _PC_PRIO_IO , _PC_SOCK_MAXBUF , _PC_FILESIZEBITS , _PC_REC_INCR_XFER_SIZE , _PC_REC_MAX_XFER_SIZE , _PC_REC_MIN_XFER_SIZE , _PC_REC_XFER_ALIGN , _PC_ALLOC_SIZE_MIN , _PC_SYMLINK_MAX , _PC_2_SYMLINKS } ; enum enum_imopVarPre28 { _SC_ARG_MAX, _SC_CHILD_MAX , _SC_CLK_TCK , _SC_NGROUPS_MAX , _SC_OPEN_MAX , _SC_STREAM_MAX , _SC_TZNAME_MAX , _SC_JOB_CONTROL , _SC_SAVED_IDS , _SC_REALTIME_SIGNALS , _SC_PRIORITY_SCHEDULING , _SC_TIMERS , _SC_ASYNCHRONOUS_IO , _SC_PRIORITIZED_IO , _SC_SYNCHRONIZED_IO , _SC_FSYNC , _SC_MAPPED_FILES , _SC_MEMLOCK , _SC_MEMLOCK_RANGE , _SC_MEMORY_PROTECTION , _SC_MESSAGE_PASSING , _SC_SEMAPHORES , _SC_SHARED_MEMORY_OBJECTS , _SC_AIO_LISTIO_MAX , _SC_AIO_MAX , _SC_AIO_PRIO_DELTA_MAX , _SC_DELAYTIMER_MAX , _SC_MQ_OPEN_MAX , _SC_MQ_PRIO_MAX , _SC_VERSION , _SC_PAGESIZE , _SC_RTSIG_MAX , _SC_SEM_NSEMS_MAX , _SC_SEM_VALUE_MAX , _SC_SIGQUEUE_MAX , _SC_TIMER_MAX , _SC_BC_BASE_MAX , _SC_BC_DIM_MAX , _SC_BC_SCALE_MAX , _SC_BC_STRING_MAX , _SC_COLL_WEIGHTS_MAX , _SC_EQUIV_CLASS_MAX , _SC_EXPR_NEST_MAX , _SC_LINE_MAX , _SC_RE_DUP_MAX , _SC_CHARCLASS_NAME_MAX , _SC_2_VERSION , _SC_2_C_BIND , _SC_2_C_DEV , _SC_2_FORT_DEV , _SC_2_FORT_RUN , _SC_2_SW_DEV , _SC_2_LOCALEDEF , _SC_PII , _SC_PII_XTI , _SC_PII_SOCKET , _SC_PII_INTERNET , _SC_PII_OSI , _SC_POLL , _SC_SELECT , _SC_UIO_MAXIOV , _SC_IOV_MAX = _SC_UIO_MAXIOV , _SC_PII_INTERNET_STREAM , _SC_PII_INTERNET_DGRAM , _SC_PII_OSI_COTS , _SC_PII_OSI_CLTS , _SC_PII_OSI_M , _SC_T_IOV_MAX , _SC_THREADS , _SC_THREAD_SAFE_FUNCTIONS , _SC_GETGR_R_SIZE_MAX , _SC_GETPW_R_SIZE_MAX , _SC_LOGIN_NAME_MAX , _SC_TTY_NAME_MAX , _SC_THREAD_DESTRUCTOR_ITERATIONS , _SC_THREAD_KEYS_MAX , _SC_THREAD_STACK_MIN , _SC_THREAD_THREADS_MAX , _SC_THREAD_ATTR_STACKADDR , _SC_THREAD_ATTR_STACKSIZE , _SC_THREAD_PRIORITY_SCHEDULING , _SC_THREAD_PRIO_INHERIT , _SC_THREAD_PRIO_PROTECT , _SC_THREAD_PROCESS_SHARED , _SC_NPROCESSORS_CONF , _SC_NPROCESSORS_ONLN , _SC_PHYS_PAGES , _SC_AVPHYS_PAGES , _SC_ATEXIT_MAX , _SC_PASS_MAX , _SC_XOPEN_VERSION , _SC_XOPEN_XCU_VERSION , _SC_XOPEN_UNIX , _SC_XOPEN_CRYPT , _SC_XOPEN_ENH_I18N , _SC_XOPEN_SHM , _SC_2_CHAR_TERM , _SC_2_C_VERSION , _SC_2_UPE , _SC_XOPEN_XPG2 , _SC_XOPEN_XPG3 , _SC_XOPEN_XPG4 , _SC_CHAR_BIT , _SC_CHAR_MAX , _SC_CHAR_MIN , _SC_INT_MAX , _SC_INT_MIN , _SC_LONG_BIT , _SC_WORD_BIT , _SC_MB_LEN_MAX , _SC_NZERO , _SC_SSIZE_MAX , _SC_SCHAR_MAX , _SC_SCHAR_MIN , _SC_SHRT_MAX , _SC_SHRT_MIN , _SC_UCHAR_MAX , _SC_UINT_MAX , _SC_ULONG_MAX , _SC_USHRT_MAX , _SC_NL_ARGMAX , _SC_NL_LANGMAX , _SC_NL_MSGMAX , _SC_NL_NMAX , _SC_NL_SETMAX , _SC_NL_TEXTMAX , _SC_XBS5_ILP32_OFF32 , _SC_XBS5_ILP32_OFFBIG , _SC_XBS5_LP64_OFF64 , _SC_XBS5_LPBIG_OFFBIG , _SC_XOPEN_LEGACY , _SC_XOPEN_REALTIME , _SC_XOPEN_REALTIME_THREADS , _SC_ADVISORY_INFO , _SC_BARRIERS , _SC_BASE , _SC_C_LANG_SUPPORT , _SC_C_LANG_SUPPORT_R , _SC_CLOCK_SELECTION , _SC_CPUTIME , _SC_THREAD_CPUTIME , _SC_DEVICE_IO , _SC_DEVICE_SPECIFIC , _SC_DEVICE_SPECIFIC_R , _SC_FD_MGMT , _SC_FIFO , _SC_PIPE , _SC_FILE_ATTRIBUTES , _SC_FILE_LOCKING , _SC_FILE_SYSTEM , _SC_MONOTONIC_CLOCK , _SC_MULTI_PROCESS , _SC_SINGLE_PROCESS , _SC_NETWORKING , _SC_READER_WRITER_LOCKS , _SC_SPIN_LOCKS , _SC_REGEXP , _SC_REGEX_VERSION , _SC_SHELL , _SC_SIGNALS , _SC_SPAWN , _SC_SPORADIC_SERVER , _SC_THREAD_SPORADIC_SERVER , _SC_SYSTEM_DATABASE , _SC_SYSTEM_DATABASE_R , _SC_TIMEOUTS , _SC_TYPED_MEMORY_OBJECTS , _SC_USER_GROUPS , _SC_USER_GROUPS_R , _SC_2_PBS , _SC_2_PBS_ACCOUNTING , _SC_2_PBS_LOCATE , _SC_2_PBS_MESSAGE , _SC_2_PBS_TRACK , _SC_SYMLOOP_MAX , _SC_STREAMS , _SC_2_PBS_CHECKPOINT , _SC_V6_ILP32_OFF32 , _SC_V6_ILP32_OFFBIG , _SC_V6_LP64_OFF64 , _SC_V6_LPBIG_OFFBIG , _SC_HOST_NAME_MAX , _SC_TRACE , _SC_TRACE_EVENT_FILTER , _SC_TRACE_INHERIT , _SC_TRACE_LOG , _SC_LEVEL1_ICACHE_SIZE , _SC_LEVEL1_ICACHE_ASSOC , _SC_LEVEL1_ICACHE_LINESIZE , _SC_LEVEL1_DCACHE_SIZE , _SC_LEVEL1_DCACHE_ASSOC , _SC_LEVEL1_DCACHE_LINESIZE , _SC_LEVEL2_CACHE_SIZE , _SC_LEVEL2_CACHE_ASSOC , _SC_LEVEL2_CACHE_LINESIZE , _SC_LEVEL3_CACHE_SIZE , _SC_LEVEL3_CACHE_ASSOC , _SC_LEVEL3_CACHE_LINESIZE , _SC_LEVEL4_CACHE_SIZE , _SC_LEVEL4_CACHE_ASSOC , _SC_LEVEL4_CACHE_LINESIZE , _SC_IPV6 = _SC_LEVEL1_ICACHE_SIZE + 50 , _SC_RAW_SOCKETS , _SC_V7_ILP32_OFF32 , _SC_V7_ILP32_OFFBIG , _SC_V7_LP64_OFF64 , _SC_V7_LPBIG_OFFBIG , _SC_SS_REPL_MAX , _SC_TRACE_EVENT_NAME_MAX , _SC_TRACE_NAME_MAX , _SC_TRACE_SYS_MAX , _SC_TRACE_USER_EVENT_MAX , _SC_XOPEN_STREAMS , _SC_THREAD_ROBUST_PRIO_INHERIT , _SC_THREAD_ROBUST_PRIO_PROTECT } ; enum enum_imopVarPre29 { _CS_PATH, _CS_V6_WIDTH_RESTRICTED_ENVS , _CS_GNU_LIBC_VERSION , _CS_GNU_LIBPTHREAD_VERSION , _CS_V5_WIDTH_RESTRICTED_ENVS , _CS_V7_WIDTH_RESTRICTED_ENVS , _CS_LFS_CFLAGS = 1000 , _CS_LFS_LDFLAGS , _CS_LFS_LIBS , _CS_LFS_LINTFLAGS , _CS_LFS64_CFLAGS , _CS_LFS64_LDFLAGS , _CS_LFS64_LIBS , _CS_LFS64_LINTFLAGS , _CS_XBS5_ILP32_OFF32_CFLAGS = 1100 , _CS_XBS5_ILP32_OFF32_LDFLAGS , _CS_XBS5_ILP32_OFF32_LIBS , _CS_XBS5_ILP32_OFF32_LINTFLAGS , _CS_XBS5_ILP32_OFFBIG_CFLAGS , _CS_XBS5_ILP32_OFFBIG_LDFLAGS , _CS_XBS5_ILP32_OFFBIG_LIBS , _CS_XBS5_ILP32_OFFBIG_LINTFLAGS , _CS_XBS5_LP64_OFF64_CFLAGS , _CS_XBS5_LP64_OFF64_LDFLAGS , _CS_XBS5_LP64_OFF64_LIBS , _CS_XBS5_LP64_OFF64_LINTFLAGS , _CS_XBS5_LPBIG_OFFBIG_CFLAGS , _CS_XBS5_LPBIG_OFFBIG_LDFLAGS , _CS_XBS5_LPBIG_OFFBIG_LIBS , _CS_XBS5_LPBIG_OFFBIG_LINTFLAGS , _CS_POSIX_V6_ILP32_OFF32_CFLAGS , _CS_POSIX_V6_ILP32_OFF32_LDFLAGS , _CS_POSIX_V6_ILP32_OFF32_LIBS , _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS , _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS , _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS , _CS_POSIX_V6_ILP32_OFFBIG_LIBS , _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS , _CS_POSIX_V6_LP64_OFF64_CFLAGS , _CS_POSIX_V6_LP64_OFF64_LDFLAGS , _CS_POSIX_V6_LP64_OFF64_LIBS , _CS_POSIX_V6_LP64_OFF64_LINTFLAGS , _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS , _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS , _CS_POSIX_V6_LPBIG_OFFBIG_LIBS , _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS , _CS_POSIX_V7_ILP32_OFF32_CFLAGS , _CS_POSIX_V7_ILP32_OFF32_LDFLAGS , _CS_POSIX_V7_ILP32_OFF32_LIBS , _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS , _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS , _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS , _CS_POSIX_V7_ILP32_OFFBIG_LIBS , _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS , _CS_POSIX_V7_LP64_OFF64_CFLAGS , _CS_POSIX_V7_LP64_OFF64_LDFLAGS , _CS_POSIX_V7_LP64_OFF64_LIBS , _CS_POSIX_V7_LP64_OFF64_LINTFLAGS , _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS , _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS , _CS_POSIX_V7_LPBIG_OFFBIG_LIBS , _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS , _CS_V6_ENV , _CS_V7_ENV } ; extern long int sysconf(int __name); void bots_get_date(char *str); void bots_get_architecture(char *str); void bots_get_load_average(char *str); void bots_print_results(void ); struct stUn_imopVarPre30 { unsigned char _x[4]; } ; typedef struct stUn_imopVarPre30 omp_lock_t; struct stUn_imopVarPre31 { unsigned char _x[8 + sizeof(void *)]; } ; typedef struct stUn_imopVarPre31 omp_nest_lock_t; enum omp_sched_t { omp_sched_static = 1, omp_sched_dynamic = 2 , omp_sched_guided = 3 , omp_sched_auto = 4 } ; typedef enum omp_sched_t omp_sched_t; enum omp_proc_bind_t { omp_proc_bind_false = 0, omp_proc_bind_true = 1 , omp_proc_bind_master = 2 , omp_proc_bind_close = 3 , omp_proc_bind_spread = 4 } ; typedef enum omp_proc_bind_t omp_proc_bind_t; enum omp_lock_hint_t { omp_lock_hint_none = 0, omp_lock_hint_uncontended = 1 , omp_lock_hint_contended = 2 , omp_lock_hint_nonspeculative = 4 , omp_lock_hint_speculative = 8 } ; typedef enum omp_lock_hint_t omp_lock_hint_t; extern int omp_get_max_threads(void ); extern void omp_init_lock(omp_lock_t *); extern void omp_set_lock(omp_lock_t *); extern void omp_unset_lock(omp_lock_t *); void bots_print_usage(void ); void bots_set_info(); void bots_get_params_common(int argc, char **argv); void bots_get_params(int argc, char **argv); void energymonitor__setfilename(char *profFileName); void energymonitor__init(int cores, float sleeptime); void energymonitor__startprofiling(); void energymonitor__stopprofiling(); void energymonitor__trackpoweronly(); struct Results { long hosps_number; long hosps_personnel; long total_patients; long total_in_village; long total_waiting; long total_assess; long total_inside; long total_time; long total_hosps_v; } ; extern int sim_level; struct Patient { int id; int32_t seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } ; struct Hosp { int personnel; int free_personnel; struct Patient *waiting; struct Patient *assess; struct Patient *inside; struct Patient *realloc; omp_lock_t realloc_lock; } ; struct Village { int id; struct Village *back; struct Village *next; struct Village *forward; struct Patient *population; struct Hosp hosp; int level; int32_t seed; } ; float my_rand(int32_t *seed); void put_in_hosp(struct Hosp *hosp, struct Patient *patient); void addList(struct Patient **list, struct Patient *patient); void removeList(struct Patient **list, struct Patient *patient); void check_patients_inside(struct Village *village); void check_patients_waiting(struct Village *village); void check_patients_realloc(struct Village *village); void check_patients_assess_par(struct Village *village); struct Results get_results(struct Village *village); void read_input_data(char *filename); void allocate_village(struct Village **capital, struct Village *back , struct Village *next , int level , int32_t vid); void sim_village_main_par(struct Village *top); void sim_village_par(struct Village *village); int check_village(struct Village *top); void check_patients_population(struct Village *village); void my_print(struct Village *village); extern int bots_sequential_flag; extern int bots_check_flag; extern int bots_result; extern int bots_output_format; extern int bots_print_header; extern char bots_name[]; extern char bots_parameters[]; extern char bots_model[]; extern char bots_resources[]; extern char bots_exec_date[]; extern char bots_exec_message[]; extern char bots_comp_date[]; extern char bots_comp_message[]; extern char bots_cc[]; extern char bots_cflags[]; extern char bots_ld[]; extern char bots_ldflags[]; extern double bots_time_program; extern double bots_time_sequential; extern unsigned long long bots_number_of_tasks; extern char bots_cutoff[]; extern int bots_cutoff_value; long bots_usecs(); void bots_error(int error, char *message); enum enum_imopVarPre32 { BOTS_VERBOSE_NONE = 0, BOTS_VERBOSE_DEFAULT , BOTS_VERBOSE_DEBUG } ; typedef enum enum_imopVarPre32 bots_verbose_mode_t; extern bots_verbose_mode_t bots_verbose_mode; int sim_level; int sim_cities; int sim_population_ratio; int sim_time; int sim_assess_time; int sim_convalescence_time; int32_t sim_seed; float sim_get_sick_p; float sim_convalescence_p; float sim_realloc_p; int sim_pid = 0; int res_population; int res_hospitals; int res_personnel; int res_checkin; int res_village; int res_waiting; int res_assess; int res_inside; float res_avg_stay; float my_rand(int32_t *seed) { int32_t k; int32_t idum = *seed; idum ^= 123459876; k = idum / 127773; idum = 16807 * (idum - k * 127773) - 2836 * k; idum ^= 123459876; if (idum < 0) { idum += 2147483647; } *seed = idum * 2147483647; return (float) (1.0 / 2147483647) * idum; } void addList(struct Patient **list, struct Patient *patient) { if (*list == ((void *) 0)) { *list = patient; patient->back = ((void *) 0); patient->forward = ((void *) 0); } else { struct Patient *aux = *list; while (aux->forward != ((void *) 0)) { aux = aux->forward; } aux->forward = patient; patient->back = aux; patient->forward = ((void *) 0); } } void removeList(struct Patient **list, struct Patient *patient) { if (patient->back != ((void *) 0)) { patient->back->forward = patient->forward; } else { *list = patient->forward; } if (patient->forward != ((void *) 0)) { patient->forward->back = patient->back; } } void allocate_village(struct Village **capital, struct Village *back , struct Village *next , int level , int32_t vid) { int i; int population; int personnel; struct Village *current; struct Village *inext; struct Patient *patient; if (level == 0) { *capital = ((void *) 0); } else { double _imopVarPre111; _imopVarPre111 = pow(2, level); personnel = (int) _imopVarPre111; population = personnel * sim_population_ratio; unsigned long int _imopVarPre114; void *_imopVarPre115; _imopVarPre114 = sizeof(struct Village); _imopVarPre115 = malloc(_imopVarPre114); *capital = (struct Village *) _imopVarPre115; (*capital)->back = back; (*capital)->next = next; (*capital)->level = level; (*capital)->id = vid; (*capital)->seed = vid * (127773 + sim_seed); (*capital)->population = ((void *) 0); for (i = 0; i < population; i++) { unsigned long int _imopVarPre118; void *_imopVarPre119; _imopVarPre118 = sizeof(struct Patient); _imopVarPre119 = malloc(_imopVarPre118); patient = (struct Patient *) _imopVarPre119; patient->id = sim_pid++; patient->seed = (*capital)->seed; int *_imopVarPre121; _imopVarPre121 = &((*capital)->seed); my_rand(_imopVarPre121); patient->hosps_visited = 0; patient->time = 0; patient->time_left = 0; patient->home_village = *capital; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre123; _imopVarPre123 = &((*capital)->population); addList(_imopVarPre123, patient); } (*capital)->hosp.personnel = personnel; (*capital)->hosp.free_personnel = personnel; (*capital)->hosp.assess = ((void *) 0); (*capital)->hosp.waiting = ((void *) 0); (*capital)->hosp.inside = ((void *) 0); (*capital)->hosp.realloc = ((void *) 0); struct stUn_imopVarPre30 *_imopVarPre125; _imopVarPre125 = &(*capital)->hosp.realloc_lock; omp_init_lock(_imopVarPre125); // #pragma omp dummyFlush LOCK_WRITE_END written([heapCell#1, sim_seed, sim_pid, bots_exec_message.f, bots_output_format, bots_time_program, heapCell#2, sim_assess_time, res_personnel, bots_comp_message.f, bots_cutoff_value, bots_resources.f, sim_get_sick_p, res_waiting, bots_verbose_mode, nullCell, bots_arg_file.f, res_avg_stay, sim_convalescence_p, bots_ld.f, res_population, bots_check_flag, top, res_village, res_assess, bots_parameters.f, bots_model.f, sim_convalescence_time, res_inside, res_hospitals, sim_population_ratio, res_checkin, bots_result, bots_comp_date.f, bots_exec_date.f, sim_time, bots_cutoff.f, bots_cc.f, bots_cflags.f, sim_level, bots_name.f, sim_cities, bots_print_header, sim_realloc_p, bots_ldflags.f, bots_execname.f]) read([&sim_convalescence_time, sim_seed, bots_exec_date, bots_exec_message.f, &res_checkin, heapCell#2, &sim_level, removeList, bots_verbose_mode, bots_usecs, check_patients_waiting, top, &sim_realloc_p, res_assess, bots_parameters.f, heapCell#0, bots_result, bots_exec_date.f, &sim_assess_time, &sim_get_sick_p, bots_sequential_flag, fprintf, bots_get_architecture, check_patients_inside, &res_waiting, _imopVarPre144, bots_parameters, heapCell#1, &sim_population_ratio, &sim_time, bots_error, atoi, &heapCell#1, bots_cutoff_value, bots_execname, stderr, vlist, sim_get_sick_p, free_BOTS_VERBOSE_DEFAULT, bots_name, res_avg_stay, vlist, read_input_data, bots_check_flag, put_in_hosp, res_village, get_results, getloadavg, check_patients_population, sim_village_main_par, res_hospitals, malloc, fclose, bots_cflags, bots_print_results, bots_cflags.f, sim_level, pow, energymonitor__startprofiling, bots_arg_file, energymonitor__init, allocate_village, sim_cities, snprintf, bots_print_usage, &res_population, error, gettimeofday, &sim_convalescence_p, bots_ld, bots_execname.f, &res_hospitals, &res_assess, stdout, bots_cutoff, bots_time_program, bots_output_format, fopen, bots_arg_file.f, bots_cc, check_village, bots_ld.f, bots_model.f, sim_convalescence_time, res_inside, fscanf, sim_population_ratio, res_checkin, &sim_seed, bots_number_of_tasks, bots_cutoff.f, &res_personnel, bots_comp_message, bots_cc.f, bots_get_load_average, sysconf, bots_resources, bots_name.f, energymonitor__setfilename, sim_realloc_p, free__SC_NPROCESSORS_CONF, bots_ldflags.f, &res_village, sim_village_par, bots_model, energymonitor__trackpoweronly, addList, sim_pid, sim_assess_time, res_personnel, bots_comp_date, bots_comp_message.f, bots_set_info, i, my_print, bots_resources.f, res_waiting, nullCell, &sim_cities, sim_convalescence_p, res_population, top, bots_ldflags, omp_get_max_threads, my_rand, exit, bots_comp_date.f, bots_exec_message, sim_time, uname, omp_unset_lock, omp_init_lock, energymonitor__stopprofiling, vlist, fflush, &res_avg_stay, sprintf, bots_time_sequential, strcpy, bots_print_header, &res_inside, check_patients_assess_par]) inext = ((void *) 0); for (i = sim_cities; i > 0; i--) { int32_t city = (int32_t) sim_cities; int _imopVarPre130; int _imopVarPre131; struct Village *_imopVarPre132; struct Village { int id; struct Village *back; struct Village *next; struct Village *forward; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *population; struct Hosp { int personnel; int free_personnel; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *waiting; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *assess; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *inside; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *realloc; struct stUn_imopVarPre30 { unsigned char _x[4]; } realloc_lock; } hosp; int level; int seed; } **_imopVarPre133; _imopVarPre130 = (vid * city) + (int32_t) i; _imopVarPre131 = level - 1; _imopVarPre132 = *capital; _imopVarPre133 = &current; allocate_village(_imopVarPre133, _imopVarPre132, inext, _imopVarPre131, _imopVarPre130); inext = current; } (*capital)->forward = current; } } struct Results get_results(struct Village *village) { struct Village *vlist; struct Patient *p; struct Results t_res; struct Results p_res; t_res.hosps_number = 0.0; t_res.hosps_personnel = 0.0; t_res.total_patients = 0.0; t_res.total_in_village = 0.0; t_res.total_waiting = 0.0; t_res.total_assess = 0.0; t_res.total_inside = 0.0; t_res.total_hosps_v = 0.0; t_res.total_time = 0.0; if (village == ((void *) 0)) { return t_res; } vlist = village->forward; while (vlist) { p_res = get_results(vlist); t_res.hosps_number += p_res.hosps_number; t_res.hosps_personnel += p_res.hosps_personnel; t_res.total_patients += p_res.total_patients; t_res.total_in_village += p_res.total_in_village; t_res.total_waiting += p_res.total_waiting; t_res.total_assess += p_res.total_assess; t_res.total_inside += p_res.total_inside; t_res.total_hosps_v += p_res.total_hosps_v; t_res.total_time += p_res.total_time; vlist = vlist->next; } t_res.hosps_number += 1.0; t_res.hosps_personnel += village->hosp.personnel; p = village->population; while (p != ((void *) 0)) { t_res.total_patients += 1.0; t_res.total_in_village += 1.0; t_res.total_hosps_v += (float) (p->hosps_visited); t_res.total_time += (float) (p->time); p = p->forward; } p = village->hosp.waiting; while (p != ((void *) 0)) { t_res.total_patients += 1.0; t_res.total_waiting += 1.0; t_res.total_hosps_v += (float) (p->hosps_visited); t_res.total_time += (float) (p->time); p = p->forward; } p = village->hosp.assess; while (p != ((void *) 0)) { t_res.total_patients += 1.0; t_res.total_assess += 1.0; t_res.total_hosps_v += (float) (p->hosps_visited); t_res.total_time += (float) (p->time); p = p->forward; } p = village->hosp.inside; while (p != ((void *) 0)) { t_res.total_patients += 1.0; t_res.total_inside += 1.0; t_res.total_hosps_v += (float) (p->hosps_visited); t_res.total_time += (float) (p->time); p = p->forward; } return t_res; } void check_patients_inside(struct Village *village) { struct Patient *list = village->hosp.inside; struct Patient *p; while (list != ((void *) 0)) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { village->hosp.free_personnel++; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre135; _imopVarPre135 = &(village->hosp.inside); removeList(_imopVarPre135, p); struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre137; _imopVarPre137 = &(village->population); addList(_imopVarPre137, p); } } } void check_patients_assess_par(struct Village *village) { struct Patient *list = village->hosp.assess; float rand; struct Patient *p; while (list != ((void *) 0)) { p = list; list = list->forward; p->time_left--; if (p->time_left == 0) { int *_imopVarPre139; float _imopVarPre140; _imopVarPre139 = &(p->seed); _imopVarPre140 = my_rand(_imopVarPre139); rand = _imopVarPre140; if (rand < sim_convalescence_p) { int *_imopVarPre142; float _imopVarPre143; _imopVarPre142 = &(p->seed); _imopVarPre143 = my_rand(_imopVarPre142); rand = _imopVarPre143; int _imopVarPre144; _imopVarPre144 = rand > sim_realloc_p; if (!_imopVarPre144) { _imopVarPre144 = village->level == sim_level; } if (_imopVarPre144) { struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre146; _imopVarPre146 = &(village->hosp.assess); removeList(_imopVarPre146, p); struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre148; _imopVarPre148 = &(village->hosp.inside); addList(_imopVarPre148, p); p->time_left = sim_convalescence_time; p->time += p->time_left; } else { village->hosp.free_personnel++; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre150; _imopVarPre150 = &(village->hosp.assess); removeList(_imopVarPre150, p); struct stUn_imopVarPre30 *_imopVarPre152; _imopVarPre152 = &(village->hosp.realloc_lock); // #pragma omp dummyFlush LOCK_MODIFY_START written([heapCell#1, sim_seed, sim_pid, bots_exec_message.f, bots_output_format, bots_time_program, heapCell#2, sim_assess_time, res_personnel, bots_comp_message.f, bots_cutoff_value, i, bots_resources.f, sim_get_sick_p, res_waiting, bots_verbose_mode, nullCell, bots_arg_file.f, res_avg_stay, sim_convalescence_p, bots_ld.f, res_population, bots_check_flag, top, res_village, res_assess, bots_parameters.f, bots_model.f, sim_convalescence_time, res_inside, res_hospitals, sim_population_ratio, res_checkin, bots_result, bots_comp_date.f, bots_exec_date.f, sim_time, bots_cutoff.f, bots_cc.f, bots_cflags.f, sim_level, bots_name.f, sim_cities, bots_print_header, sim_realloc_p, bots_ldflags.f, bots_execname.f]) read([omp_set_lock]) omp_set_lock(_imopVarPre152); // #pragma omp dummyFlush LOCK_MODIFY_END written([]) read([addList, omp_unset_lock]) struct Village *backvill = village->back; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre154; _imopVarPre154 = &(backvill->hosp.realloc); addList(_imopVarPre154, p); struct stUn_imopVarPre30 *_imopVarPre156; _imopVarPre156 = &(village->hosp.realloc_lock); omp_unset_lock(_imopVarPre156); // #pragma omp dummyFlush LOCK_WRITE_END written([]) read([sim_village_par, heapCell#1, addList, sim_pid, heapCell#2, sim_assess_time, &heapCell#1, i, removeList, sim_get_sick_p, check_patients_waiting, nullCell, top, sim_convalescence_p, top, put_in_hosp, check_patients_population, sim_convalescence_time, my_rand, malloc, sim_time, omp_unset_lock, omp_init_lock, sim_level, vlist, sim_realloc_p, check_patients_inside, _imopVarPre144, check_patients_assess_par]) } } else { village->hosp.free_personnel++; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre158; _imopVarPre158 = &(village->hosp.assess); removeList(_imopVarPre158, p); struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre160; _imopVarPre160 = &(village->population); addList(_imopVarPre160, p); } } } } void check_patients_waiting(struct Village *village) { struct Patient *list = village->hosp.waiting; struct Patient *p; while (list != ((void *) 0)) { p = list; list = list->forward; if (village->hosp.free_personnel > 0) { village->hosp.free_personnel--; p->time_left = sim_assess_time; p->time += p->time_left; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre162; _imopVarPre162 = &(village->hosp.waiting); removeList(_imopVarPre162, p); struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre164; _imopVarPre164 = &(village->hosp.assess); addList(_imopVarPre164, p); } else { p->time++; } } } void check_patients_realloc(struct Village *village) { struct Patient *p; struct Patient *s; while (village->hosp.realloc != ((void *) 0)) { p = s = village->hosp.realloc; while (p != ((void *) 0)) { if (p->id < s->id) { s = p; } p = p->forward; } struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre166; _imopVarPre166 = &(village->hosp.realloc); removeList(_imopVarPre166, s); struct Hosp *_imopVarPre168; _imopVarPre168 = &(village->hosp); put_in_hosp(_imopVarPre168, s); } } void check_patients_population(struct Village *village) { struct Patient *list = village->population; struct Patient *p; float rand; while (list != ((void *) 0)) { p = list; list = list->forward; int *_imopVarPre170; float _imopVarPre171; _imopVarPre170 = &(p->seed); _imopVarPre171 = my_rand(_imopVarPre170); rand = _imopVarPre171; if (rand < sim_get_sick_p) { struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre173; _imopVarPre173 = &(village->population); removeList(_imopVarPre173, p); struct Hosp *_imopVarPre175; _imopVarPre175 = &(village->hosp); put_in_hosp(_imopVarPre175, p); } } } void put_in_hosp(struct Hosp *hosp, struct Patient *patient) { (patient->hosps_visited)++; if (hosp->free_personnel > 0) { hosp->free_personnel--; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre177; _imopVarPre177 = &(hosp->assess); addList(_imopVarPre177, patient); patient->time_left = sim_assess_time; patient->time += patient->time_left; } else { struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } **_imopVarPre179; _imopVarPre179 = &(hosp->waiting); addList(_imopVarPre179, patient); } } void sim_village_par(struct Village *village) { struct Village *vlist; if (village == ((void *) 0)) { return; } vlist = village->forward; while (vlist) { // #pragma omp dummyFlush TASK_START written([heapCell#1, sim_seed, sim_pid, bots_exec_message.f, bots_output_format, bots_time_program, heapCell#2, sim_assess_time, res_personnel, bots_comp_message.f, bots_cutoff_value, i, bots_resources.f, sim_get_sick_p, res_waiting, bots_verbose_mode, nullCell, bots_arg_file.f, res_avg_stay, sim_convalescence_p, bots_ld.f, res_population, bots_check_flag, top, res_village, res_assess, bots_parameters.f, bots_model.f, sim_convalescence_time, res_inside, res_hospitals, sim_population_ratio, res_checkin, bots_result, bots_comp_date.f, bots_exec_date.f, sim_time, bots_cutoff.f, bots_cc.f, bots_cflags.f, sim_level, bots_name.f, sim_cities, bots_print_header, sim_realloc_p, bots_ldflags.f, bots_execname.f]) read([sim_village_par, sim_convalescence_time, addList, my_rand, sim_assess_time, bots_cutoff_value, removeList, sim_level, vlist, check_patients_waiting, sim_convalescence_p, sim_realloc_p, check_patients_inside, _imopVarPre144, check_patients_assess_par]) #pragma omp task if((sim_level - village->level) < bots_cutoff_value) { sim_village_par(vlist); } // #pragma omp dummyFlush TASK_END written([]) read([sim_convalescence_time, addList, my_rand, sim_assess_time, removeList, sim_level, vlist, check_patients_waiting, sim_convalescence_p, sim_realloc_p, check_patients_inside, _imopVarPre144, check_patients_assess_par]) vlist = vlist->next; } check_patients_inside(village); check_patients_assess_par(village); check_patients_waiting(village); // #pragma omp dummyFlush TASKWAIT_START written([heapCell#1, sim_seed, sim_pid, bots_exec_message.f, bots_output_format, bots_time_program, heapCell#2, sim_assess_time, res_personnel, bots_comp_message.f, bots_cutoff_value, i, bots_resources.f, sim_get_sick_p, res_waiting, bots_verbose_mode, nullCell, bots_arg_file.f, res_avg_stay, sim_convalescence_p, bots_ld.f, res_population, bots_check_flag, top, res_village, res_assess, bots_parameters.f, bots_model.f, sim_convalescence_time, res_inside, res_hospitals, sim_population_ratio, res_checkin, bots_result, bots_comp_date.f, bots_exec_date.f, sim_time, bots_cutoff.f, bots_cc.f, bots_cflags.f, sim_level, bots_name.f, sim_cities, bots_print_header, sim_realloc_p, bots_ldflags.f, bots_execname.f]) read([sim_village_par, heapCell#1, addList, sim_pid, heapCell#2, sim_assess_time, &heapCell#1, i, removeList, sim_get_sick_p, check_patients_waiting, nullCell, top, sim_convalescence_p, top, put_in_hosp, check_patients_population, check_patients_realloc, sim_convalescence_time, my_rand, malloc, sim_time, omp_unset_lock, omp_init_lock, sim_level, vlist, sim_realloc_p, check_patients_inside, _imopVarPre144, check_patients_assess_par]) #pragma omp taskwait check_patients_realloc(village); check_patients_population(village); } void my_print(struct Village *village) { struct Village *vlist; struct Patient *plist; if (village == ((void *) 0)) { return; } vlist = village->forward; while (vlist) { my_print(vlist); vlist = vlist->next; } plist = village->population; while (plist != ((void *) 0)) { ; plist = plist->forward; } ; } void read_input_data(char *filename) { FILE *fin; int res; struct _IO_FILE *_imopVarPre181; _imopVarPre181 = fopen(filename, "r"); if ((fin = _imopVarPre181) == ((void *) 0)) { if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { fprintf(stdout, "Could not open sequence file (%s)\n", filename); } ; int _imopVarPre183; _imopVarPre183 = -1; exit(_imopVarPre183); } float *_imopVarPre203; int *_imopVarPre204; int *_imopVarPre205; int *_imopVarPre206; int *_imopVarPre207; int *_imopVarPre208; int *_imopVarPre209; int *_imopVarPre210; int *_imopVarPre211; float *_imopVarPre212; float *_imopVarPre213; float *_imopVarPre214; int *_imopVarPre215; int *_imopVarPre216; int *_imopVarPre217; int *_imopVarPre218; int *_imopVarPre219; int *_imopVarPre220; int *_imopVarPre221; int _imopVarPre222; _imopVarPre203 = &res_avg_stay; _imopVarPre204 = &res_inside; _imopVarPre205 = &res_assess; _imopVarPre206 = &res_waiting; _imopVarPre207 = &res_village; _imopVarPre208 = &res_checkin; _imopVarPre209 = &res_personnel; _imopVarPre210 = &res_hospitals; _imopVarPre211 = &res_population; _imopVarPre212 = &sim_realloc_p; _imopVarPre213 = &sim_convalescence_p; _imopVarPre214 = &sim_get_sick_p; _imopVarPre215 = &sim_seed; _imopVarPre216 = &sim_convalescence_time; _imopVarPre217 = &sim_assess_time; _imopVarPre218 = &sim_time; _imopVarPre219 = &sim_population_ratio; _imopVarPre220 = &sim_cities; _imopVarPre221 = &sim_level; _imopVarPre222 = fscanf(fin, "%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f", _imopVarPre221, _imopVarPre220, _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215, _imopVarPre214, _imopVarPre213, _imopVarPre212, _imopVarPre211, _imopVarPre210, _imopVarPre209, _imopVarPre208, _imopVarPre207, _imopVarPre206, _imopVarPre205, _imopVarPre204, _imopVarPre203); res = _imopVarPre222; if (res == (-1)) { if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { fprintf(stdout, "Bogus input file (%s)\n", filename); } ; int _imopVarPre224; _imopVarPre224 = -1; exit(_imopVarPre224); } fclose(fin); if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { fprintf(stdout, "\n"); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre226; _imopVarPre226 = (int) sim_level; fprintf(stdout, "Number of levels = %d\n", _imopVarPre226); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre228; _imopVarPre228 = (int) sim_cities; fprintf(stdout, "Cities per level = %d\n", _imopVarPre228); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre230; _imopVarPre230 = (int) sim_population_ratio; fprintf(stdout, "Population ratio = %d\n", _imopVarPre230); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre232; _imopVarPre232 = (int) sim_time; fprintf(stdout, "Simulation time = %d\n", _imopVarPre232); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre234; _imopVarPre234 = (int) sim_assess_time; fprintf(stdout, "Assess time = %d\n", _imopVarPre234); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre236; _imopVarPre236 = (int) sim_convalescence_time; fprintf(stdout, "Convalescence time = %d\n", _imopVarPre236); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre238; _imopVarPre238 = (int) sim_seed; fprintf(stdout, "Initial seed = %d\n", _imopVarPre238); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { float _imopVarPre240; _imopVarPre240 = (float) sim_get_sick_p; fprintf(stdout, "Get sick prob. = %f\n", _imopVarPre240); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { float _imopVarPre242; _imopVarPre242 = (float) sim_convalescence_p; fprintf(stdout, "Convalescence prob. = %f\n", _imopVarPre242); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { float _imopVarPre244; _imopVarPre244 = (float) sim_realloc_p; fprintf(stdout, "Realloc prob. = %f\n", _imopVarPre244); } ; } int check_village(struct Village *top) { struct Results _imopVarPre246; _imopVarPre246 = get_results(top); struct Results result = _imopVarPre246; int answer = 1; if (res_population != result.total_patients) { answer = 2; } if (res_hospitals != result.hosps_number) { answer = 2; } if (res_personnel != result.hosps_personnel) { answer = 2; } if (res_checkin != result.total_hosps_v) { answer = 2; } if (res_village != result.total_in_village) { answer = 2; } if (res_waiting != result.total_waiting) { answer = 2; } if (res_assess != result.total_assess) { answer = 2; } if (res_inside != result.total_inside) { answer = 2; } if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { fprintf(stdout, "\n"); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { fprintf(stdout, "Sim. Variables = expect / result\n"); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre249; int _imopVarPre250; _imopVarPre249 = (int) result.total_patients; _imopVarPre250 = (int) res_population; fprintf(stdout, "Total population = %6d / %6d people\n", _imopVarPre250, _imopVarPre249); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre253; int _imopVarPre254; _imopVarPre253 = (int) result.hosps_number; _imopVarPre254 = (int) res_hospitals; fprintf(stdout, "Hospitals = %6d / %6d people\n", _imopVarPre254, _imopVarPre253); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre257; int _imopVarPre258; _imopVarPre257 = (int) result.hosps_personnel; _imopVarPre258 = (int) res_personnel; fprintf(stdout, "Personnel = %6d / %6d people\n", _imopVarPre258, _imopVarPre257); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre261; int _imopVarPre262; _imopVarPre261 = (int) result.total_hosps_v; _imopVarPre262 = (int) res_checkin; fprintf(stdout, "Check-in's = %6d / %6d people\n", _imopVarPre262, _imopVarPre261); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre265; int _imopVarPre266; _imopVarPre265 = (int) result.total_in_village; _imopVarPre266 = (int) res_village; fprintf(stdout, "In Villages = %6d / %6d people\n", _imopVarPre266, _imopVarPre265); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre269; int _imopVarPre270; _imopVarPre269 = (int) result.total_waiting; _imopVarPre270 = (int) res_waiting; fprintf(stdout, "In Waiting List = %6d / %6d people\n", _imopVarPre270, _imopVarPre269); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre273; int _imopVarPre274; _imopVarPre273 = (int) result.total_assess; _imopVarPre274 = (int) res_assess; fprintf(stdout, "In Assess = %6d / %6d people\n", _imopVarPre274, _imopVarPre273); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { int _imopVarPre277; int _imopVarPre278; _imopVarPre277 = (int) result.total_inside; _imopVarPre278 = (int) res_inside; fprintf(stdout, "Inside Hospital = %6d / %6d people\n", _imopVarPre278, _imopVarPre277); } ; if (bots_verbose_mode >= BOTS_VERBOSE_DEFAULT) { float _imopVarPre281; float _imopVarPre282; _imopVarPre281 = (float) result.total_time / result.total_patients; _imopVarPre282 = (float) res_avg_stay; fprintf(stdout, "Average Stay = %6f / %6f u/time\n", _imopVarPre282, _imopVarPre281); } ; my_print(top); return answer; } void sim_village_main_par(struct Village *top) { long i; #pragma omp parallel { #pragma omp single nowait { // #pragma omp dummyFlush TASK_START written([sim_seed, bots_exec_message.f, bots_output_format, bots_time_program, sim_assess_time, res_personnel, bots_comp_message.f, bots_cutoff_value, bots_resources.f, sim_get_sick_p, res_waiting, bots_verbose_mode, nullCell, bots_arg_file.f, res_avg_stay, sim_convalescence_p, bots_ld.f, res_population, bots_check_flag, top, res_village, res_assess, bots_parameters.f, bots_model.f, sim_convalescence_time, res_inside, res_hospitals, sim_population_ratio, res_checkin, bots_result, bots_comp_date.f, bots_exec_date.f, sim_time, bots_cutoff.f, bots_cc.f, bots_cflags.f, sim_level, bots_name.f, sim_cities, bots_print_header, sim_realloc_p, bots_ldflags.f, bots_execname.f]) read([sim_village_par, sim_convalescence_time, addList, my_rand, sim_time, sim_assess_time, i, removeList, sim_level, vlist, check_patients_waiting, top, sim_convalescence_p, sim_realloc_p, check_patients_inside, _imopVarPre144, check_patients_assess_par]) #pragma omp task { for (i = 0; i < sim_time; i++) { sim_village_par(top); } } // #pragma omp dummyFlush TASK_END written([i]) read([]) // #pragma omp dummyFlush TASK_START written([]) read([]) } // #pragma omp dummyFlush BARRIER_START written([]) read([&sim_convalescence_time, sim_seed, bots_exec_date, bots_exec_message.f, &res_checkin, heapCell#2, &sim_level, removeList, bots_verbose_mode, bots_usecs, check_patients_waiting, top, &sim_realloc_p, res_assess, bots_parameters.f, heapCell#0, bots_result, bots_exec_date.f, &sim_assess_time, &sim_get_sick_p, bots_sequential_flag, fprintf, bots_get_architecture, check_patients_inside, &res_waiting, _imopVarPre144, bots_parameters, heapCell#1, &sim_population_ratio, &sim_time, bots_error, atoi, &heapCell#1, bots_cutoff_value, bots_execname, stderr, vlist, sim_get_sick_p, free_BOTS_VERBOSE_DEFAULT, bots_name, res_avg_stay, vlist, read_input_data, bots_check_flag, put_in_hosp, res_village, get_results, getloadavg, check_patients_population, sim_village_main_par, res_hospitals, malloc, fclose, bots_cflags, bots_print_results, bots_cflags.f, sim_level, pow, energymonitor__startprofiling, bots_arg_file, energymonitor__init, allocate_village, sim_cities, snprintf, bots_print_usage, &res_population, error, gettimeofday, &sim_convalescence_p, bots_ld, bots_execname.f, &res_hospitals, &res_assess, stdout, bots_cutoff, bots_time_program, bots_output_format, fopen, bots_arg_file.f, bots_cc, check_village, bots_ld.f, bots_model.f, sim_convalescence_time, res_inside, fscanf, sim_population_ratio, res_checkin, &sim_seed, bots_number_of_tasks, bots_cutoff.f, &res_personnel, bots_comp_message, bots_cc.f, bots_get_load_average, sysconf, bots_resources, bots_name.f, energymonitor__setfilename, sim_realloc_p, free__SC_NPROCESSORS_CONF, bots_ldflags.f, &res_village, sim_village_par, bots_model, energymonitor__trackpoweronly, addList, sim_pid, sim_assess_time, res_personnel, bots_comp_date, bots_comp_message.f, bots_set_info, i, my_print, bots_resources.f, res_waiting, nullCell, &sim_cities, sim_convalescence_p, res_population, top, bots_ldflags, omp_get_max_threads, my_rand, exit, bots_comp_date.f, bots_exec_message, sim_time, uname, omp_unset_lock, omp_init_lock, energymonitor__stopprofiling, vlist, fflush, &res_avg_stay, sprintf, bots_time_sequential, strcpy, bots_print_header, &res_inside, check_patients_assess_par]) #pragma omp barrier } } int bots_sequential_flag = 0; int bots_check_flag = 0; bots_verbose_mode_t bots_verbose_mode = BOTS_VERBOSE_DEFAULT; int bots_result = 3; int bots_output_format = 1; int bots_print_header = 0; char bots_name[256]; char bots_execname[256]; char bots_parameters[256]; char bots_model[256]; char bots_resources[256]; char bots_exec_date[256]; char bots_exec_message[256]; char bots_comp_date[256]; char bots_comp_message[256]; char bots_cc[256]; char bots_cflags[256]; char bots_ld[256]; char bots_ldflags[256]; char bots_cutoff[256]; double bots_time_program = 0.0; double bots_time_sequential = 0.0; unsigned long long bots_number_of_tasks = 0; char bots_arg_file[255] = ""; int bots_cutoff_value = 2; void bots_print_usage() { fprintf(stderr, "\n"); fprintf(stderr, "Usage: %s -[options]\n", bots_execname); fprintf(stderr, "\n"); fprintf(stderr, "Where options are:\n"); fprintf(stderr, " -f <file> : " "Health input file (mandatory)" "\n"); fprintf(stderr, " -x <value> : OpenMP tasks cut-off value (default=%d)\n", 2); fprintf(stderr, "\n"); fprintf(stderr, " -e <str> : Include 'str' execution message.\n"); fprintf(stderr, " -v <level> : Set verbose level (default = 1).\n"); fprintf(stderr, " 0 - none.\n"); fprintf(stderr, " 1 - default.\n"); fprintf(stderr, " 2 - debug.\n"); fprintf(stderr, " -o <value> : Set output format mode (default = 1).\n"); fprintf(stderr, " 0 - no benchmark output.\n"); fprintf(stderr, " 1 - detailed list format.\n"); fprintf(stderr, " 2 - detailed row format.\n"); fprintf(stderr, " 3 - abridged list format.\n"); fprintf(stderr, " 4 - abridged row format.\n"); fprintf(stderr, " -z : Print row header (if output format is a row variant).\n"); fprintf(stderr, "\n"); fprintf(stderr, " -c : Check mode ON.\n"); fprintf(stderr, "\n"); fprintf(stderr, " -h : Print program's usage (this help).\n"); fprintf(stderr, "\n"); } void bots_get_params_common(int argc, char **argv) { int i; char *_imopVarPre287; char *_imopVarPre288; _imopVarPre287 = argv[0]; _imopVarPre288 = __xpg_basename(_imopVarPre287); strcpy(bots_execname, _imopVarPre288); bots_get_date(bots_exec_date); strcpy(bots_exec_message, ""); for (i = 1; i < argc; i++) { if (argv[i][0] == '-') { switch (argv[i][1]) { case 'c': argv[i][1] = '*'; bots_check_flag = 1; break; case 'e': argv[i][1] = '*'; i++; if (argc == i) { bots_print_usage(); exit(100); } char *_imopVarPre290; _imopVarPre290 = argv[i]; strcpy(bots_exec_message, _imopVarPre290); break; case 'f': argv[i][1] = '*'; i++; if (argc == i) { bots_print_usage(); exit(100); } char *_imopVarPre292; _imopVarPre292 = argv[i]; strcpy(bots_arg_file, _imopVarPre292); break; case 'h': argv[i][1] = '*'; bots_print_usage(); exit(100); case 'o': argv[i][1] = '*'; i++; if (argc == i) { bots_print_usage(); exit(100); } char *_imopVarPre294; int _imopVarPre295; _imopVarPre294 = argv[i]; _imopVarPre295 = atoi(_imopVarPre294); bots_output_format = _imopVarPre295; break; case 'v': argv[i][1] = '*'; i++; if (argc == i) { bots_print_usage(); exit(100); } char *_imopVarPre298; int _imopVarPre299; _imopVarPre298 = argv[i]; _imopVarPre299 = atoi(_imopVarPre298); bots_verbose_mode = (bots_verbose_mode_t) _imopVarPre299; if (bots_verbose_mode > 1) { fprintf(stderr, "Error: Configure the suite using '--debug' option in order to use a verbose level greather than 1.\n"); exit(100); } break; case 'x': argv[i][1] = '*'; i++; if (argc == i) { bots_print_usage(); exit(100); } char *_imopVarPre301; int _imopVarPre302; _imopVarPre301 = argv[i]; _imopVarPre302 = atoi(_imopVarPre301); bots_cutoff_value = _imopVarPre302; break; case 'z': argv[i][1] = '*'; bots_print_header = 1; break; default: fprintf(stderr, "Error: Unrecognized parameter.\n"); bots_print_usage(); exit(100); } } else { fprintf(stderr, "Error: Unrecognized parameter.\n"); bots_print_usage(); exit(100); } } } void bots_get_params(int argc, char **argv) { bots_get_params_common(argc, argv); } void bots_set_info() { snprintf(bots_name, 256, "Health"); snprintf(bots_parameters, 256, "%s", bots_arg_file); snprintf(bots_model, 256, "OpenMP (using tasks)"); int _imopVarPre304; _imopVarPre304 = omp_get_max_threads(); snprintf(bots_resources, 256, "%d", _imopVarPre304); snprintf(bots_comp_date, 256, "7MAY2018"); snprintf(bots_comp_message, 256, "bots"); snprintf(bots_cc, 256, "gcc"); snprintf(bots_cflags, 256, "-fopenmp"); snprintf(bots_ld, 256, "gcc"); snprintf(bots_ldflags, 256, "-lm"); snprintf(bots_cutoff, 256, "pragma-if (%d)", bots_cutoff_value); } int main(int argc, char *argv[]) { long bots_t_start; long bots_t_end; bots_get_params(argc, argv); struct Village *top; read_input_data(bots_arg_file); ; bots_set_info(); void *_imopVarPre308; void *_imopVarPre309; struct Village { int id; struct Village *back; struct Village *next; struct Village *forward; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *population; struct Hosp { int personnel; int free_personnel; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *waiting; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *assess; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *inside; struct Patient { int id; int seed; int time; int time_left; int hosps_visited; struct Village *home_village; struct Patient *back; struct Patient *forward; } *realloc; struct stUn_imopVarPre30 { unsigned char _x[4]; } realloc_lock; } hosp; int level; int seed; } **_imopVarPre310; _imopVarPre308 = ((void *) 0); _imopVarPre309 = ((void *) 0); _imopVarPre310 = &top; allocate_village(_imopVarPre310, _imopVarPre309, _imopVarPre308, sim_level, 0); ; int cores = 255; energymonitor__setfilename("prof.csv"); energymonitor__init(cores, 1); energymonitor__trackpoweronly(); energymonitor__startprofiling(); bots_t_start = bots_usecs(); sim_village_main_par(top); ; bots_t_end = bots_usecs(); energymonitor__stopprofiling(); bots_time_program = ((double) (bots_t_end - bots_t_start)) / 1000000; ; if (bots_check_flag) { bots_result = check_village(top); ; } ; bots_print_results(); return 0; } void bots_error(int error, char *message) { if (message == ((void *) 0)) { switch (error) { case 0: fprintf(stderr, "Error (%d): %s\n", error, "Unspecified error."); break; case 1: fprintf(stderr, "Error (%d): %s\n", error, "Not enough memory."); break; case 2: fprintf(stderr, "Error (%d): %s\n", error, "Unrecognized parameter."); bots_print_usage(); break; default: fprintf(stderr, "Error (%d): %s\n", error, "Invalid error code."); break; } } else { fprintf(stderr, "Error (%d): %s\n", error, message); } int _imopVarPre312; _imopVarPre312 = 100 + error; exit(_imopVarPre312); } long bots_usecs(void ) { struct timeval t; void *_imopVarPre315; struct timeval *_imopVarPre316; _imopVarPre315 = ((void *) 0); _imopVarPre316 = &t; gettimeofday(_imopVarPre316, _imopVarPre315); return t.tv_sec * 1000000 + t.tv_usec; } void bots_get_date(char *str) { time_t now; signed long int *_imopVarPre318; _imopVarPre318 = &now; time(_imopVarPre318); signed long int *_imopVarPre323; struct tm *_imopVarPre324; _imopVarPre323 = &now; _imopVarPre324 = gmtime(_imopVarPre323); strftime(str, 32, "%Y/%m/%d;%H:%M", _imopVarPre324); } void bots_get_architecture(char *str) { signed long int _imopVarPre326; _imopVarPre326 = sysconf(_SC_NPROCESSORS_CONF); int ncpus = _imopVarPre326; struct utsname architecture; struct utsname *_imopVarPre328; _imopVarPre328 = &architecture; uname(_imopVarPre328); char ( *_imopVarPre331 ); char ( *_imopVarPre332 ); _imopVarPre331 = architecture.machine; _imopVarPre332 = architecture.sysname; snprintf(str, 256, "%s-%s;%d", _imopVarPre332, _imopVarPre331, ncpus); } void bots_get_load_average(char *str) { double loadavg[3]; getloadavg(loadavg, 3); double _imopVarPre336; double _imopVarPre337; double _imopVarPre338; _imopVarPre336 = loadavg[2]; _imopVarPre337 = loadavg[1]; _imopVarPre338 = loadavg[0]; snprintf(str, 256, "%.2f;%.2f;%.2f", _imopVarPre338, _imopVarPre337, _imopVarPre336); } void bots_print_results() { char str_name[256]; char str_parameters[256]; char str_model[256]; char str_resources[256]; char str_result[15]; char str_time_program[15]; char str_time_sequential[15]; char str_speed_up[15]; char str_number_of_tasks[15]; char str_number_of_tasks_per_second[15]; char str_exec_date[256]; char str_exec_message[256]; char str_architecture[256]; char str_load_avg[256]; char str_comp_date[256]; char str_comp_message[256]; char str_cc[256]; char str_cflags[256]; char str_ld[256]; char str_ldflags[256]; char str_cutoff[256]; sprintf(str_name, "%s", bots_name); sprintf(str_parameters, "%s", bots_parameters); sprintf(str_model, "%s", bots_model); sprintf(str_cutoff, "%s", bots_cutoff); sprintf(str_resources, "%s", bots_resources); switch (bots_result) { case 0: sprintf(str_result, "n/a"); break; case 1: sprintf(str_result, "successful"); break; case 2: sprintf(str_result, "UNSUCCESSFUL"); break; case 3: sprintf(str_result, "Not requested"); break; default: sprintf(str_result, "error"); break; } sprintf(str_time_program, "%f", bots_time_program); if (bots_sequential_flag) { sprintf(str_time_sequential, "%f", bots_time_sequential); } else { sprintf(str_time_sequential, "n/a"); } if (bots_sequential_flag) { double _imopVarPre340; _imopVarPre340 = bots_time_sequential / bots_time_program; sprintf(str_speed_up, "%3.2f", _imopVarPre340); } else { sprintf(str_speed_up, "n/a"); } float _imopVarPre342; _imopVarPre342 = (float) bots_number_of_tasks; sprintf(str_number_of_tasks, "%3.2f", _imopVarPre342); double _imopVarPre344; _imopVarPre344 = (float) bots_number_of_tasks / bots_time_program; sprintf(str_number_of_tasks_per_second, "%3.2f", _imopVarPre344); sprintf(str_exec_date, "%s", bots_exec_date); sprintf(str_exec_message, "%s", bots_exec_message); bots_get_architecture(str_architecture); bots_get_load_average(str_load_avg); sprintf(str_comp_date, "%s", bots_comp_date); sprintf(str_comp_message, "%s", bots_comp_message); sprintf(str_cc, "%s", bots_cc); sprintf(str_cflags, "%s", bots_cflags); sprintf(str_ld, "%s", bots_ld); sprintf(str_ldflags, "%s", bots_ldflags); if (bots_print_header) { switch (bots_output_format) { case 0: break; case 1: break; case 2: fprintf(stdout, "Benchmark;Parameters;Model;Cutoff;Resources;Result;Time;Sequential;Speed-up;Nodes;Nodes/Sec;Exec Date;Exec Time;Exec Message;Architecture;Processors;Load Avg-1;Load Avg-5;Load Avg-15;Comp Date;Comp Time;Comp Message;CC;CFLAGS;LD;LDFLAGS\n"); break; case 3: break; case 4: fprintf(stdout, "Benchmark;Parameters;Model;Cutoff;Resources;Result;Time;Sequential;Speed-up;Nodes;Nodes/Sec;\n"); break; default: break; } } switch (bots_output_format) { case 0: break; case 1: fprintf(stdout, "\n"); fprintf(stdout, "Program = %s\n", str_name); fprintf(stdout, "Parameters = %s\n", str_parameters); fprintf(stdout, "Model = %s\n", str_model); fprintf(stdout, "Embedded cut-off = %s\n", str_cutoff); fprintf(stdout, "# of Threads = %s\n", str_resources); fprintf(stdout, "Verification = %s\n", str_result); fprintf(stdout, "Time Program = %s seconds\n", str_time_program); if (bots_sequential_flag) { fprintf(stdout, "Time Sequential = %s seconds\n", str_time_sequential); fprintf(stdout, "Speed-up = %s\n", str_speed_up); } if (bots_number_of_tasks > 0) { fprintf(stdout, "Nodes = %s\n", str_number_of_tasks); fprintf(stdout, "Nodes/Sec = %s\n", str_number_of_tasks_per_second); } fprintf(stdout, "Execution Date = %s\n", str_exec_date); fprintf(stdout, "Execution Message = %s\n", str_exec_message); fprintf(stdout, "Architecture = %s\n", str_architecture); fprintf(stdout, "Load Avg [1:5:15] = %s\n", str_load_avg); fprintf(stdout, "Compilation Date = %s\n", str_comp_date); fprintf(stdout, "Compilation Message = %s\n", str_comp_message); fprintf(stdout, "Compiler = %s\n", str_cc); fprintf(stdout, "Compiler Flags = %s\n", str_cflags); fprintf(stdout, "Linker = %s\n", str_ld); fprintf(stdout, "Linker Flags = %s\n", str_ldflags); fflush(stdout); break; case 2: fprintf(stdout, "%s;%s;%s;%s;%s;%s;", str_name, str_parameters, str_model, str_cutoff, str_resources, str_result); fprintf(stdout, "%s;%s;%s;", str_time_program, str_time_sequential, str_speed_up); fprintf(stdout, "%s;%s;", str_number_of_tasks, str_number_of_tasks_per_second); fprintf(stdout, "%s;%s;", str_exec_date, str_exec_message); fprintf(stdout, "%s;%s;", str_architecture, str_load_avg); fprintf(stdout, "%s;%s;", str_comp_date, str_comp_message); fprintf(stdout, "%s;%s;%s;%s;", str_cc, str_cflags, str_ld, str_ldflags); fprintf(stdout, "\n"); break; case 3: fprintf(stdout, "\n"); fprintf(stdout, "Program = %s\n", str_name); fprintf(stdout, "Parameters = %s\n", str_parameters); fprintf(stdout, "Model = %s\n", str_model); fprintf(stdout, "Embedded cut-off = %s\n", str_cutoff); fprintf(stdout, "# of Threads = %s\n", str_resources); fprintf(stdout, "Verification = %s\n", str_result); fprintf(stdout, "Time Program = %s seconds\n", str_time_program); if (bots_sequential_flag) { fprintf(stdout, "Time Sequential = %s seconds\n", str_time_sequential); fprintf(stdout, "Speed-up = %s\n", str_speed_up); } if (bots_number_of_tasks > 0) { fprintf(stdout, "Nodes = %s\n", str_number_of_tasks); fprintf(stdout, "Nodes/Sec = %s\n", str_number_of_tasks_per_second); } break; case 4: fprintf(stdout, "%s;%s;%s;%s;%s;%s;", str_name, str_parameters, str_model, str_cutoff, str_resources, str_result); fprintf(stdout, "%s;%s;%s;", str_time_program, str_time_sequential, str_speed_up); fprintf(stdout, "%s;%s;", str_number_of_tasks, str_number_of_tasks_per_second); fprintf(stdout, "\n"); break; default: bots_error(0, "No valid output format\n"); break; } }
boxFilter_OPSAT_AoS.h
#pragma once #include "boxFilter.hpp" //one pass box filtering AoS class boxFilter_OPSAT_AoS { protected: cv::Mat src; cv::Mat dest; int r; int parallelType; float div; int row; int col; int cn; int loop; virtual void filter_impl(int cnNum); public: boxFilter_OPSAT_AoS(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : src(_src), dest(_dest), r(_r), parallelType(_parallelType) { div = 1.f / ((2 * r + 1)*(2 * r + 1)); row = src.rows; col = src.cols; cn = src.channels(); init(); } virtual void init() { loop = cn; } void filter() { if (parallelType == ParallelTypes::NAIVE) { for (int i = 1; i <= loop; i++) { filter_impl(i - 1); } } else if (parallelType == ParallelTypes::OMP) { #pragma omp parallel for for (int i = 1; i <= loop; i++) { filter_impl(i - 1); } } else if (parallelType == PARALLEL_FOR_) { #pragma omp parallel sections { #pragma omp section { for (int i = 0; i < loop / 8; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 8; i < loop / 4; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 4; i < loop / 8 * 3; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 8 * 3; i < loop / 2; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 2; i < loop / 8 * 5; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 8 * 5; i < loop / 4 * 3; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 4 * 3; i < loop / 8 * 7; i++) filter_impl(i); } #pragma omp section { for (int i = loop / 8 * 7; i < loop; i++) filter_impl(i); } } } } }; class boxFilter_OPSAT_AoS_SSE : public boxFilter_OPSAT_AoS { private: __m128 mDiv; __m128 mBorder; void filter_impl(int cnNum) override; public: boxFilter_OPSAT_AoS_SSE(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_AoS(_src, _dest, _r, _parallelType) { init(); } void init() override { loop = cn / 4; mDiv = _mm_set1_ps(div); mBorder = _mm_set1_ps(static_cast<float>(r + 1)); } }; class boxFilter_OPSAT_AoS_AVX : public boxFilter_OPSAT_AoS { private: __m256 mDiv; __m256 mBorder; void filter_impl(int cnNum) override; public: boxFilter_OPSAT_AoS_AVX(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_AoS(_src, _dest, _r, _parallelType) { init(); } void init() override { loop = cn / 8; mDiv = _mm256_set1_ps(div); mBorder = _mm256_set1_ps(static_cast<float>(r + 1)); } }; // 3channel loop unroll class boxFilter_OPSAT_BGR { private: cv::Mat src; cv::Mat temp; cv::Mat dest; int r; int parallelType; float div; int row; int col; int cn; __m128 mBorder; __m128 mDiv; void filter_impl(); public: boxFilter_OPSAT_BGR(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : src(_src), dest(_dest), r(_r), parallelType(_parallelType) { div = 1.f / ((2 * r + 1)*(2 * r + 1)); row = src.rows; col = src.cols; cn = src.channels(); mBorder = _mm_set1_ps(static_cast<float>(r + 1)); mDiv = _mm_set1_ps(div); temp.create(src.rows, src.cols + 1, CV_32FC3); } void filter() { if (parallelType == ParallelTypes::NAIVE) { filter_impl(); } } }; class boxFilter_OPSAT_BGRA { private: cv::Mat src; cv::Mat srcBGRA; cv::Mat destBGRA; cv::Mat dest; int r; int parallelType; float div; int row; int col; int cn; __m128 mBorder; __m128 mDiv; void filter_impl(); public: boxFilter_OPSAT_BGRA(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : src(_src), dest(_dest), r(_r), parallelType(_parallelType) { div = 1.f / ((2 * r + 1)*(2 * r + 1)); row = src.rows; col = src.cols; cn = src.channels(); mBorder = _mm_set1_ps(static_cast<float>(r + 1)); mDiv = _mm_set1_ps(div); srcBGRA.create(src.size(), CV_32FC4); destBGRA.create(src.size(), CV_32FC4); } void filter() { if (parallelType == ParallelTypes::NAIVE) { filter_impl(); } } };
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd63_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack4_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-4a-inch/4a-36-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 36, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd43_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_pack4_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_pack4_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v28.4s}, [%1] \n" // r08 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v20.4s, v24.4s, v8.s[0] \n" "fmla v21.4s, v24.4s, v10.s[0] \n" "fmla v22.4s, v24.4s, v12.s[0] \n" "fmla v23.4s, v24.4s, v14.s[0] \n" "fmla v20.4s, v25.4s, v8.s[1] \n" "fmla v21.4s, v25.4s, v10.s[1] \n" "fmla v22.4s, v25.4s, v12.s[1] \n" "fmla v23.4s, v25.4s, v14.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v8.s[2] \n" "fmla v21.4s, v26.4s, v10.s[2] \n" "fmla v22.4s, v26.4s, v12.s[2] \n" "fmla v23.4s, v26.4s, v14.s[2] \n" "fmla v20.4s, v27.4s, v8.s[3] \n" "fmla v21.4s, v27.4s, v10.s[3] \n" "fmla v22.4s, v27.4s, v12.s[3] \n" "fmla v23.4s, v27.4s, v14.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v28.4s}, [%2] \n" // r18 "fmla v20.4s, v16.4s, v9.s[0] \n" "fmla v21.4s, v16.4s, v11.s[0] \n" "fmla v22.4s, v16.4s, v13.s[0] \n" "fmla v23.4s, v16.4s, v15.s[0] \n" "fmla v20.4s, v17.4s, v9.s[1] \n" "fmla v21.4s, v17.4s, v11.s[1] \n" "fmla v22.4s, v17.4s, v13.s[1] \n" "fmla v23.4s, v17.4s, v15.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v9.s[2] \n" "fmla v21.4s, v18.4s, v11.s[2] \n" "fmla v22.4s, v18.4s, v13.s[2] \n" "fmla v23.4s, v18.4s, v15.s[2] \n" "fmla v20.4s, v19.4s, v9.s[3] \n" "fmla v21.4s, v19.4s, v11.s[3] \n" "fmla v22.4s, v19.4s, v13.s[3] \n" "fmla v23.4s, v19.4s, v15.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, v24.4s, v10.s[0] \n" "fmla v21.4s, v24.4s, v12.s[0] \n" "fmla v22.4s, v24.4s, v14.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v10.s[1] \n" "fmla v21.4s, v25.4s, v12.s[1] \n" "fmla v22.4s, v25.4s, v14.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v10.s[2] \n" "fmla v21.4s, v26.4s, v12.s[2] \n" "fmla v22.4s, v26.4s, v14.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v10.s[3] \n" "fmla v21.4s, v27.4s, v12.s[3] \n" "fmla v22.4s, v27.4s, v14.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v28.4s}, [%3] \n" // r28 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // r04 r05 r06 r07 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" // r08 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r14 r15 r16 r17 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r18 "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" // r24 r25 r26 r27 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3 :128] \n" // r28 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1] \n" // r04 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4s}, [%2] \n" // r14 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3] \n" // r24 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128] \n" // r04 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r10 r11 r12 r13 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r14 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3 :128] \n" // r24 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v5.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "add %1, %1, #32 \n" "fadd v22.4s, v21.4s, v22.4s \n" "add %2, %2, #32 \n" "fadd v23.4s, v23.4s, v22.4s \n" "add %3, %3, #32 \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n" // sum0 "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vadd.f32 q14, q14, q13 \n" "add %1, %1, #32 \n" "vadd.f32 q15, q15, q14 \n" "add %2, %2, #32 \n" "vadd.f32 q12, q12, q15 \n" "add %3, %3, #32 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } } static void conv3x3s2_im2col_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; // im2col Mat bottom_im2col(size, 9, inch, 16u, 4, opt.workspace_allocator); { const int gap = (w * 2 - outw * 2) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); Mat out = bottom_im2col.channel(p); float* ptr0 = out.row(0); float* ptr1 = out.row(1); float* ptr2 = out.row(2); float* ptr3 = out.row(3); float* ptr4 = out.row(4); float* ptr5 = out.row(5); float* ptr6 = out.row(6); float* ptr7 = out.row(7); float* ptr8 = out.row(8); const float* r0 = img.row(0); const float* r1 = img.row(1); const float* r2 = img.row(2); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); vst1q_f32(ptr0, _r00); vst1q_f32(ptr0 + 4, _r02); vst1q_f32(ptr1, _r01); vst1q_f32(ptr1 + 4, _r03); vst1q_f32(ptr2, _r02); vst1q_f32(ptr2 + 4, _r04); vst1q_f32(ptr3, _r10); vst1q_f32(ptr3 + 4, _r12); vst1q_f32(ptr4, _r11); vst1q_f32(ptr4 + 4, _r13); vst1q_f32(ptr5, _r12); vst1q_f32(ptr5 + 4, _r14); vst1q_f32(ptr6, _r20); vst1q_f32(ptr6 + 4, _r22); vst1q_f32(ptr7, _r21); vst1q_f32(ptr7 + 4, _r23); vst1q_f32(ptr8, _r22); vst1q_f32(ptr8 + 4, _r24); r0 += 16; r1 += 16; r2 += 16; ptr0 += 8; ptr1 += 8; ptr2 += 8; ptr3 += 8; ptr4 += 8; ptr5 += 8; ptr6 += 8; ptr7 += 8; ptr8 += 8; } for (; j < outw; j++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); vst1q_f32(ptr0, _r00); vst1q_f32(ptr1, _r01); vst1q_f32(ptr2, _r02); vst1q_f32(ptr3, _r10); vst1q_f32(ptr4, _r11); vst1q_f32(ptr5, _r12); vst1q_f32(ptr6, _r20); vst1q_f32(ptr7, _r21); vst1q_f32(ptr8, _r22); r0 += 8; r1 += 8; r2 += 8; ptr0 += 4; ptr1 += 4; ptr2 += 4; ptr3 += 4; ptr4 += 4; ptr5 += 4; ptr6 += 4; ptr7 += 4; ptr8 += 4; } r0 += gap; r1 += gap; r2 += gap; } } } im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
opencl_encfs_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <[email protected]> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_encfs; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_encfs); #else #include <stdint.h> #include <string.h> #include <openssl/opensslv.h> #include <openssl/crypto.h> #include <openssl/ssl.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/hmac.h> #include <openssl/engine.h> #ifdef _OPENMP #include <omp.h> #endif #include "common-opencl.h" #include "arch.h" #include "formats.h" #include "common.h" #include "encfs_common.h" #include "options.h" #include "misc.h" #define OUTLEN (32 + 16) #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "encfs-opencl" #define FORMAT_NAME "EncFS" #define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES/Blowfish" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD /* This handles all widths */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static int *cracked; static int any_cracked; static const int KEY_CHECKSUM_BYTES = 4; static encfs_common_custom_salt *cur_salt; static struct fmt_tests tests[] = { {"$encfs$192*181474*0*20*f1c413d9a20f7fdbc068c5a41524137a6e3fb231*44*9c0d4e2b990fac0fd78d62c3d2661272efa7d6c1744ee836a702a11525958f5f557b7a973aaad2fd14387b4f", "openwall"}, {"$encfs$128*181317*0*20*e9a6d328b4c75293d07b093e8ec9846d04e22798*36*b9e83adb462ac8904695a60de2f3e6d57018ccac2227251d3f8fc6a8dd0cd7178ce7dc3f", "Jupiter"}, {"$encfs$256*714949*0*20*472a967d35760775baca6aefd1278f026c0e520b*52*ac3b7ee4f774b4db17336058186ab78d209504f8a58a4272b5ebb25e868a50eaf73bcbc5e3ffd50846071c882feebf87b5a231b6", "Valient Gough"}, {"$encfs$256*120918*0*20*e6eb9a85ee1c348bc2b507b07680f4f220caa763*52*9f75473ade3887bca7a7bb113fbc518ffffba631326a19c1e7823b4564ae5c0d1e4c7e4aec66d16924fa4c341cd52903cc75eec4", "Alo3San1t@nats"}, {NULL} }; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static int new_keys; static struct fmt_main *self; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; #define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 251) #define ITERATIONS 181474 /* Just for auto tune */ #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = PLAINTEXT_LENGTH * gws; /// Allocate memory inbuffer = mem_calloc(1, key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(1, cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void set_salt(void *salt) { cur_salt = (encfs_common_custom_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltLen); currentsalt.length = cur_salt->saltLen; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->keySize + cur_salt->ivLength; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i, j, index; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } /// Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); } /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; unsigned char master[MAX_KEYLENGTH + MAX_IVLENGTH]; unsigned char tmpBuf[cur_salt->dataLen]; unsigned int checksum = 0; unsigned int checksum2 = 0; memcpy(master, output[index].dk, cur_salt->keySize + cur_salt->ivLength); // First N bytes are checksum bytes. for (i = 0; i < KEY_CHECKSUM_BYTES; ++i) checksum = (checksum << 8) | (unsigned int)cur_salt->data[i]; memcpy(tmpBuf, cur_salt->data + KEY_CHECKSUM_BYTES, cur_salt->keySize + cur_salt->ivLength); encfs_common_streamDecode(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength ,checksum, master); checksum2 = encfs_common_MAC_32(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength, master); if (checksum2 == checksum) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_encfs = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, tests }, { init, done, reset, fmt_default_prepare, encfs_common_valid, fmt_default_split, fmt_default_binary, encfs_common_get_salt, { encfs_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
ast-dump-openmp-target-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:1, col:49> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
GB_binop__pow_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64) // C=scalar+B GB (_bind1st__pow_fp64) // C=scalar+B' GB (_bind1st_tran__pow_fp64) // C=A+scalar GB (_bind2nd__pow_fp64) // C=A'+scalar GB (_bind2nd_tran__pow_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GB_pow (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GB_pow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GB_pow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NeighborhoodGraph.h
#ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(16), m_iCEFScale(4), m_iRefineIter(0), m_iCEF(1000), m_iMaxCheckForRefineGraph(10000) {} ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const int node, int insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const int node, int* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const int samples, const std::unordered_map<int, int>* idmap = nullptr) = 0; template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<int, int>* idmap = nullptr) { std::cout << "build RNG graph!" << std::endl; m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); m_dataUpdateLock.resize(m_iGraphSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); std::cout << "Build RNG Graph end!" << std::endl; return; } { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<int>> TptreeDataIndices(m_iTPTNumber, std::vector<int>(m_iGraphSize)); std::vector<std::vector<std::pair<int, int>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<int, int>>()); for (int i = 0; i < m_iGraphSize; i++) for (int j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; std::cout << "Parallel TpTree Partition begin " << std::endl; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (int j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); std::cout << "Finish Getting Leaves for Tree " << i << std::endl; } std::cout << "Parallel TpTree Partition done" << std::endl; for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (int j = 0; j < TptreeLeafNodes[i].size(); j++) { int start_index = TptreeLeafNodes[i][j].first; int end_index = TptreeLeafNodes[i][j].second; if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%'; for (int x = start_index; x < end_index; x++) { for (int y = x + 1; y <= end_index; y++) { int p1 = TptreeDataIndices[i][x]; int p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); std::cout << std::endl; } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); } if (m_iMaxCheckForRefineGraph > 0) { RefineGraph<T>(index, idmap); } } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<int, int>* idmap = nullptr) { m_iCEF *= m_iCEFScale; m_iMaxCheckForRefineGraph *= m_iCEFScale; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false); } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; m_iCEF /= m_iCEFScale; m_iMaxCheckForRefineGraph /= m_iCEFScale; m_iNeighborhoodSize /= m_iNeighborhoodScale; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false); } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<int>& indices, std::vector<int>& reverseIndices, std::string graphFileName, const std::unordered_map<int, int>* idmap = nullptr) { int R = (int)indices.size(); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < R; i++) { RefineNode<T>(index, indices[i], false); int* nodes = m_pNeighborhoodGraph[indices[i]]; for (int j = 0; j < m_iNeighborhoodSize; j++) { if (nodes[j] < 0) nodes[j] = -1; else nodes[j] = reverseIndices[nodes[j]]; } if (idmap == nullptr || idmap->find(-1 - indices[i]) == idmap->end()) continue; nodes[m_iNeighborhoodSize - 1] = -2 - idmap->at(-1 - indices[i]); } std::ofstream graphOut(graphFileName, std::ios::binary); if (!graphOut.is_open()) return ErrorCode::FailedCreateFile; graphOut.write((char*)&R, sizeof(int)); graphOut.write((char*)&m_iNeighborhoodSize, sizeof(int)); for (int i = 0; i < R; i++) { graphOut.write((char*)m_pNeighborhoodGraph[indices[i]], sizeof(int) * m_iNeighborhoodSize); } graphOut.close(); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const int node, bool updateNeighbors) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), m_iCEF + 1); index->SearchIndex(query); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), m_iCEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= m_iCEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; std::lock_guard<std::mutex> lock(m_dataUpdateLock[item->VID]); InsertNeighbors(index, item->VID, node, item->Dist); } } } template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<int>& indices, const int first, const int last, std::vector<std::pair<int, int>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.push_back(std::make_pair(first, last)); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; int end = min(first + m_iSamples, last); int count = end - first + 1; // calculate the mean of each dimension for (int j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (int k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (int k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (int j = 0; j < index->GetFeatureDim(); j++) { Variance.push_back(BasicResult(j, 0)); } // calculate the variance of each dimension for (int j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (int k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<int> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (int j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (int j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } int i = first; int j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } bool LoadGraph(std::string sGraphFilename) { std::cout << "Load Graph From " << sGraphFilename << std::endl; FILE * fp = fopen(sGraphFilename.c_str(), "rb"); if (fp == NULL) return false; fread(&m_iGraphSize, sizeof(int), 1, fp); fread(&m_iNeighborhoodSize, sizeof(int), 1, fp); m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); m_dataUpdateLock.resize(m_iGraphSize); for (int i = 0; i < m_iGraphSize; i++) { fread((m_pNeighborhoodGraph)[i], sizeof(int), m_iNeighborhoodSize, fp); } fclose(fp); std::cout << "Load Graph (" << m_iGraphSize << "," << m_iNeighborhoodSize << ") Finish!" << std::endl; return true; } bool SetGraph(char* pGraphMemFile) { m_iGraphSize = *((int*)pGraphMemFile); pGraphMemFile += sizeof(int); m_iNeighborhoodSize = *((int*)pGraphMemFile); pGraphMemFile += sizeof(int); m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, (int*)pGraphMemFile); m_dataUpdateLock.resize(m_iGraphSize); return true; } bool SaveGraph(std::string sGraphFilename) const { std::cout << "Save Graph To " << sGraphFilename << std::endl; FILE *fp = fopen(sGraphFilename.c_str(), "wb"); if (fp == NULL) return false; fwrite(&m_iGraphSize, sizeof(int), 1, fp); fwrite(&m_iNeighborhoodSize, sizeof(int), 1, fp); for (int i = 0; i < m_iGraphSize; i++) { fwrite((m_pNeighborhoodGraph)[i], sizeof(int), m_iNeighborhoodSize, fp); } fclose(fp); std::cout << "Save Graph (" << m_iGraphSize << "," << m_iNeighborhoodSize << ") Finish!" << std::endl; return true; } inline void AddBatch(int num) { m_pNeighborhoodGraph.AddBatch(num); m_iGraphSize += num; m_dataUpdateLock.resize(m_iGraphSize); } inline int* operator[](int index) { return m_pNeighborhoodGraph[index]; } inline const int* operator[](int index) const { return m_pNeighborhoodGraph[index]; } inline void SetR(int rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; m_dataUpdateLock.resize(m_iGraphSize); } inline int R() const { return m_iGraphSize; } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure int m_iGraphSize; COMMON::Dataset<int> m_pNeighborhoodGraph; COMMON::FineGrainedLock m_dataUpdateLock; // protect one row of the graph public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; int m_iNeighborhoodSize, m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iMaxCheckForRefineGraph; }; } } #endif
flow.c
#include "flow.h" #include "../../comms.h" #include "../../omp4/shared.h" #include "../flow_interface.h" #include <math.h> #include <stdio.h> #include <stdlib.h> // Solve a single timestep on the given mesh void solve_hydro_2d(Mesh* mesh, int tt, double* pressure, double* density0, double* density_old, double* energy, double* velocity_x, double* velocity_y, double* momentum_x, double* momentum_y, double* Qxx, double* Qyy, double* mass_flux_x, double* mass_flux_y, double* momentum_x_flux_x, double* momentum_x_flux_y, double* momentum_y_flux_x, double* momentum_y_flux_y, double* reduce_array) { if (mesh->rank == MASTER) { printf("Timestep: %.12e\n", mesh->dt); } equation_of_state(mesh->local_nx, mesh->local_ny, pressure, density0, energy); pressure_acceleration(mesh->local_nx, mesh->local_ny, mesh, mesh->dt, momentum_x, momentum_y, velocity_x, velocity_y, pressure, density0, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); artificial_viscosity(mesh->local_nx, mesh->local_ny, mesh, mesh->dt, Qxx, Qyy, velocity_x, velocity_y, momentum_x, momentum_y, density0, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); shock_heating_and_work(mesh->local_nx, mesh->local_ny, mesh, mesh->dt_h, energy, pressure, velocity_x, velocity_y, density0, Qxx, Qyy, mesh->celldx, mesh->celldy); set_timestep(mesh->local_nx, mesh->local_ny, Qxx, Qyy, density0, energy, mesh, reduce_array, tt == 0, mesh->celldx, mesh->celldy); // Perform advection advect_mass_and_energy(mesh->local_nx, mesh->local_ny, mesh, tt, mesh->dt, mesh->dt_h, density0, energy, density_old, mass_flux_x, mass_flux_y, momentum_x_flux_x, momentum_x_flux_y, velocity_x, velocity_y, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); advect_momentum(mesh->local_nx, mesh->local_ny, tt, mesh, mesh->dt_h, mesh->dt, velocity_x, velocity_y, momentum_x_flux_x, momentum_x_flux_y, momentum_y_flux_x, momentum_y_flux_y, momentum_x, momentum_y, density0, mass_flux_x, mass_flux_y, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); } // Calculate the pressure from GAMma law equation of state void equation_of_state(const int nx, const int ny, double* pressure, const double* density0, const double* energy) { START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = 0; ii < ny; ++ii) { for (int jj = 0; jj < nx; ++jj) { // Only invoke simple GAMma law at the moment pressure[(ii * nx + jj)] = (GAM - 1.0) * density0[(ii * nx + jj)] * energy[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, __func__); } // Calculates the timestep from the current state void set_timestep(const int nx, const int ny, double* Qxx, double* Qyy, const double* density0, const double* energy, Mesh* mesh, double* reduce_array, const int first_step, const double* celldx, const double* celldy) { const int pad = mesh->pad; double local_min_dt = mesh->max_dt; START_PROFILING(&compute_profile); // Check the minimum timestep from the sound speed in the nx and ny directions #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) num_teams(14) \ num_threads(1024) map(tofrom : local_min_dt) reduction(min : local_min_dt) #else #pragma omp target teams distribute parallel for map(tofrom : local_min_dt) \ reduction(min : local_min_dt) #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { // Constrain based on the sound speed within the system const double c_s = sqrt(GAM * (GAM - 1.0) * energy[(ii * nx + jj)]); const double thread_min_dt_x = celldx[jj] / sqrt(c_s * c_s + 2.0 * Qxx[(ii * nx + jj)] / density0[(ii * nx + jj)]); const double thread_min_dt_y = celldy[ii] / sqrt(c_s * c_s + 2.0 * Qyy[(ii * nx + jj)] / density0[(ii * nx + jj)]); const double thread_min_dt = min(thread_min_dt_x, thread_min_dt_y); local_min_dt = min(local_min_dt, thread_min_dt); } } STOP_PROFILING(&compute_profile, __func__); double global_min_dt = reduce_all_min(local_min_dt); // Ensure that the timestep does not jump too far from one step to the next const double final_min_dt = min(global_min_dt, C_M * mesh->dt_h); mesh->dt = 0.5 * (C_T * final_min_dt + mesh->dt_h); mesh->dt_h = (first_step) ? mesh->dt : C_T * final_min_dt; } // Calculate change in momentum caused by pressure gradients, and then extract // the velocities using edge centered density approximations void pressure_acceleration(const int nx, const int ny, Mesh* mesh, const double dt, double* momentum_x, double* momentum_y, double* velocity_x, double* velocity_y, const double* pressure, const double* density0, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Update the momenta using the pressure gradients momentum_x[(ii * (nx + 1) + jj)] -= dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - 1]) / edgedx[jj]; momentum_y[(ii * nx + jj)] -= dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - nx]) / edgedy[ii]; // Calculate the zone edge centered density const double density_edge_x = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); const double density_edge_y = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); // Find the velocities from the momenta and edge centered mass densities velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); } void artificial_viscosity(const int nx, const int ny, Mesh* mesh, const double dt, double* Qxx, double* Qyy, double* velocity_x, double* velocity_y, double* momentum_x, double* momentum_y, const double* density0, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; // Calculate the artificial viscous stresses // PLPC Hydro Paper #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { const double u_i = min(0.0, velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double u_ii = 0.5 * (fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj) + 1])) - min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]))) + fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)])) - min(0.0, (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1])))); const double v_i = min(0.0, velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double v_ii = 0.5 * (fabs(min(0.0, (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj) + nx])) - min(0.0, (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]))) + fabs(min(0.0, (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)])) - min(0.0, (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx])))); Qxx[(ii * nx + jj)] = -C_Q * density0[(ii * nx + jj)] * u_i * u_ii; Qyy[(ii * nx + jj)] = -C_Q * density0[(ii * nx + jj)] * v_i * v_ii; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, Qxx, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, Qyy, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Update the momenta by the artificial viscous stresses #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt * (Qxx[(ii * nx + jj)] - Qxx[(ii * nx + jj) - 1]) / celldx[jj]; momentum_y[(ii * nx + jj)] -= dt * (Qyy[(ii * nx + jj)] - Qyy[(ii * nx + jj) - nx]) / celldy[ii]; // Calculate the zone edge centered density const double density_edge_x = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); const double density_edge_y = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); // Find the velocities from the momenta and edge centered mass densities velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); } // Calculates the work done due to forces within the element void shock_heating_and_work(const int nx, const int ny, Mesh* mesh, const double dt_h, double* energy, const double* pressure, const double* velocity_x, const double* velocity_y, const double* density0, const double* Qxx, const double* Qyy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { const double div_vel_x = (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]) / celldx[jj]; const double div_vel_y = (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]) / celldy[ii]; const double div_vel_dt = (div_vel_x + div_vel_y) * dt_h; const double e_q = energy[(ii * nx + jj)] - dt_h * (Qxx[(ii * nx + jj)] * div_vel_x + Qyy[(ii * nx + jj)] * div_vel_y) / density0[(ii * nx + jj)]; /// A working formulation that is second order in time for Pressure!? const double density_c = density0[(ii * nx + jj)] / (1.0 + div_vel_dt); const double e_c = e_q - (pressure[(ii * nx + jj)] * div_vel_dt) / density0[(ii * nx + jj)]; const double work = 0.5 * div_vel_dt * (pressure[(ii * nx + jj)] + (GAM - 1.0) * e_c * density_c) / density0[(ii * nx + jj)]; energy[(ii * nx + jj)] = (density0[(ii * nx + jj)] == 0.0) ? 0.0 : e_q - work; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Perform advection with monotonicity improvement void advect_mass_and_energy(const int nx, const int ny, Mesh* mesh, const int tt, const double dt, const double dt_h, double* density0, double* energy, double* density_old, double* mass_flux_x, double* mass_flux_y, double* eF_x, double* eF_y, const double* velocity_x, const double* velocity_y, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); #pragma omp target teams distribute parallel for for (int ii = 0; ii < nx * ny; ++ii) { density_old[ii] = density0[ii]; } STOP_PROFILING(&compute_profile, "storing_old_density"); if (tt % 2 == 0) { x_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density0, density_old, energy, velocity_x, mass_flux_x, eF_x, celldx, edgedx, celldy, edgedy); y_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density0, density_old, energy, velocity_y, mass_flux_y, eF_y, celldx, edgedx, celldy, edgedy); } else { y_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density0, density_old, energy, velocity_y, mass_flux_y, eF_y, celldx, edgedx, celldy, edgedy); x_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density0, density_old, energy, velocity_x, mass_flux_x, eF_x, celldx, edgedx, celldy, edgedy); } } // Calculate the flux in the x direction void x_mass_and_energy_flux(const int nx, const int ny, const int first, Mesh* mesh, const double dt, const double dt_h, double* density0, double* density_old, double* energy, const double* velocity_x, double* mass_flux_x, double* eF_x, const double* celldx, const double* edgedx, const double* celldy, const double* edgedy) { const int pad = mesh->pad; // Compute the mass fluxes along the x edges // In the ghost cells flux is left as 0.0 START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Interpolate to make second order in time const double invdx = 1.0 / edgedx[jj]; const double suc0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double sur0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double sul0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double u_tc = velocity_x[(ii * (nx + 1) + jj)] - 0.5 * velocity_x[(ii * (nx + 1) + jj)] * dt * minmod(suc0, minmod(sur0, sul0)); // Van leer limiter double limiter = 0.0; const double density_diff = (density0[(ii * nx + jj)] - density0[(ii * nx + jj) - 1]); if (density_diff) { const double smoothness = (u_tc >= 0.0) ? (density0[(ii * nx + jj) - 1] - density0[(ii * nx + jj) - 2]) / density_diff : (density0[(ii * nx + jj) + 1] - density0[(ii * nx + jj)]) / density_diff; limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness)); } // Calculate the flux const double density_upwind = (u_tc >= 0.0) ? density0[(ii * nx + jj) - 1] : density0[(ii * nx + jj)]; mass_flux_x[(ii * (nx + 1) + jj)] = (u_tc * density_upwind + 0.5 * fabs(u_tc) * (1.0 - fabs((u_tc * dt_h) / celldx[jj])) * limiter * density_diff); // Use MC limiter to get slope of energy const double a_x_0 = 0.5 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 2]); const double b_x_0 = 2.0 * invdx * (energy[(ii * nx + jj) - 1] - energy[(ii * nx + jj) - 2]); const double c_x_0 = 2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]); const double a_x_1 = 0.5 * invdx * (energy[(ii * nx + jj) + 1] - energy[(ii * nx + jj) - 1]); const double b_x_1 = 2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]); const double c_x_1 = 2.0 * invdx * (energy[(ii * nx + jj) + 1] - energy[(ii * nx + jj)]); // Calculate the interpolated densities const double edge_e_x = (u_tc > 0.0) ? energy[(ii * nx + jj) - 1] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_tc * dt_h) : energy[(ii * nx + jj)] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_tc * dt_h); // Update the fluxes to now include the contribution from energy eF_x[(ii * (nx + 1) + jj)] = edgedy[ii] * edge_e_x * mass_flux_x[(ii * (nx + 1) + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx + 1, ny, mesh, mass_flux_x, INVERT_X, PACK); // Calculate the new density values START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { density0[(ii * nx + jj)] -= dt_h * (edgedy[ii + 1] * mass_flux_x[(ii * (nx + 1) + jj) + 1] - edgedy[ii] * mass_flux_x[(ii * (nx + 1) + jj)]) / (celldx[jj] * celldy[ii]); const double density_e = (density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] - (dt_h * (eF_x[(ii * (nx + 1) + jj) + 1] - eF_x[(ii * (nx + 1) + jj)])) / (celldx[jj] * celldy[ii])); energy[(ii * nx + jj)] = (first) ? (density_old[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density_old[(ii * nx + jj)] : (density0[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density0[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny, mesh, density0, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Calculate the flux in the y direction void y_mass_and_energy_flux(const int nx, const int ny, const int first, Mesh* mesh, const double dt, const double dt_h, double* density0, double* density_old, double* energy, const double* velocity_y, double* mass_flux_y, double* eF_y, const double* celldx, const double* edgedx, const double* celldy, const double* edgedy) { const int pad = mesh->pad; // Compute the mass flux along the y edges // In the ghost cells flux is left as 0.0 START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { // Interpolate the velocity to make second order in time const double invdy = 1.0 / edgedy[ii]; const double svc0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj) - nx]); const double svr0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx]); const double svl0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double v_tc = velocity_y[(ii * nx + jj)] - 0.5 * velocity_y[(ii * nx + jj)] * dt * minmod(svc0, minmod(svr0, svl0)); // Van leer limiter const double density_diff = (density0[(ii * nx + jj)] - density0[(ii * nx + jj) - nx]); double limiter = 0.0; if (density_diff) { const double smoothness = (velocity_y[(ii * nx + jj)] >= 0.0) ? (density0[(ii * nx + jj) - nx] - density0[(ii * nx + jj) - 2 * nx]) / density_diff : (density0[(ii * nx + jj) + nx] - density0[(ii * nx + jj)]) / density_diff; limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness)); } // Calculate the flux const double density_upwind = (v_tc >= 0.0) ? density0[(ii * nx + jj) - nx] : density0[(ii * nx + jj)]; mass_flux_y[(ii * nx + jj)] = (v_tc * density_upwind + 0.5 * fabs(v_tc) * (1.0 - fabs((v_tc * dt_h) / celldy[ii])) * limiter * density_diff); // Use MC limiter to get slope of energy const double a_y_0 = 0.5 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 2 * nx]); const double b_y_0 = 2.0 * invdy * (energy[(ii * nx + jj) - nx] - energy[(ii * nx + jj) - 2 * nx]); const double c_y_0 = 2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]); const double a_y_1 = 0.5 * invdy * (energy[(ii * nx + jj) + nx] - energy[(ii * nx + jj) - nx]); const double b_y_1 = 2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]); const double c_y_1 = 2.0 * invdy * (energy[(ii * nx + jj) + nx] - energy[(ii * nx + jj)]); const double edge_e_y = (v_tc > 0.0) ? energy[(ii * nx + jj) - nx] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_tc * dt_h) : energy[(ii * nx + jj)] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_tc * dt_h); // Update the fluxes to now include the contribution from energy eF_y[(ii * nx + jj)] = edgedx[jj] * edge_e_y * mass_flux_y[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny + 1, mesh, mass_flux_y, INVERT_Y, PACK); // Calculate the new density values START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { density0[(ii * nx + jj)] -= dt_h * (edgedx[jj + 1] * mass_flux_y[(ii * nx + jj) + nx] - edgedx[jj] * mass_flux_y[(ii * nx + jj)]) / (celldx[jj] * celldy[ii]); const double density_e = (density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] - (dt_h * (eF_y[(ii * nx + jj) + nx] - eF_y[(ii * nx + jj)])) / (celldx[jj] * celldy[ii])); energy[(ii * nx + jj)] = (first) ? (density_old[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density_old[(ii * nx + jj)] : (density0[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density0[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny, mesh, density0, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Advect momentum according to the velocity void advect_momentum(const int nx, const int ny, const int tt, Mesh* mesh, const double dt_h, const double dt, double* velocity_x, double* velocity_y, double* momentum_x_flux_x, double* momentum_x_flux_y, double* momentum_y_flux_x, double* momentum_y_flux_y, double* momentum_x, double* momentum_y, const double* density0, const double* mass_flux_x, const double* mass_flux_y, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { const int pad = mesh->pad; if (tt % 2) { START_PROFILING(&compute_profile); momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x, mass_flux_x, edgedx, edgedy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_x[(ii * nx + jj)] - momentum_x_flux_x[(ii * nx + jj) - 1]) / (edgedx[jj] * celldy[ii]); const double density_edge_x = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); START_PROFILING(&compute_profile); momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_x_flux_y, mass_flux_y, edgedx, edgedy, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Calculate the axial momentum #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] - momentum_x_flux_y[(ii * (nx + 1) + jj)]) / (celldx[jj] * edgedy[ii]); } } momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_y_flux_x, mass_flux_x, edgedx, celldy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] - momentum_y_flux_x[(ii * (nx + 1) + jj)]) / (edgedx[jj] * celldy[ii]); const double density_edge_y = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); START_PROFILING(&compute_profile); momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y, mass_flux_y, edgedy, celldx, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_y[(ii * nx + jj)] - momentum_y_flux_y[(ii * nx + jj) - nx]) / (celldx[jj] * edgedy[ii]); } } STOP_PROFILING(&compute_profile, __func__); } else { START_PROFILING(&compute_profile); momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_x_flux_y, mass_flux_y, edgedx, edgedy, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Calculate the axial momentum #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] - momentum_x_flux_y[(ii * (nx + 1) + jj)]) / (celldx[jj] * edgedy[ii]); const double density_edge_x = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); START_PROFILING(&compute_profile); momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x, mass_flux_x, edgedx, edgedy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_x[(ii * nx + jj)] - momentum_x_flux_x[(ii * nx + jj) - 1]) / (edgedx[jj] * celldy[ii]); } } momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y, mass_flux_y, edgedy, celldx, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_y[(ii * nx + jj)] - momentum_y_flux_y[(ii * nx + jj) - nx]) / (celldx[jj] * edgedy[ii]); const double density_edge_y = (density0[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density0[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); START_PROFILING(&compute_profile); momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_y_flux_x, mass_flux_x, edgedx, celldy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] - momentum_y_flux_x[(ii * (nx + 1) + jj)]) / (edgedx[jj] * celldy[ii]); } } STOP_PROFILING(&compute_profile, __func__); } } // Calculates the x momentum flux along the x dimension void momentum_x_flux_in_x(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_x, double* momentum_x_flux_x, const double* mass_flux_x, const double* edgedx, const double* edgedy, const double* celldx) { const int pad = mesh->pad; // Calculate the cell centered x momentum fluxes in the x direction #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdx = 1.0 / edgedx[jj]; const double a_x_0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double b_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double c_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double a_x_1 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj)]); const double b_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double c_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj) + 1]); // Calculate the interpolated densities const double u_cell_x = 0.5 * (velocity_x[(ii * (nx + 1) + jj)] + velocity_x[(ii * (nx + 1) + jj) + 1]); const double F_x = edgedy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] + mass_flux_x[(ii * (nx + 1) + jj) + 1]); const double u_cell_x_interp = (u_cell_x > 0.0) ? velocity_x[(ii * (nx + 1) + jj)] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_cell_x * dt_h) : velocity_x[(ii * (nx + 1) + jj) + 1] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_cell_x * dt_h); momentum_x_flux_x[(ii * nx + jj)] = F_x * u_cell_x_interp; } } } // Calculates the x momentum flux in the y dimension void momentum_x_flux_in_y(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_x, double* velocity_y, double* momentum_x_flux_y, const double* mass_flux_y, const double* edgedx, const double* edgedy, const double* celldy) { const int pad = mesh->pad; #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdy = 1.0 / edgedy[ii]; const double a_y_0 = 0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]); const double b_y_0 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] - velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]); const double c_y_0 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double a_y_1 = 0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double b_y_1 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double c_y_1 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] - velocity_x[(ii * (nx + 1) + jj)]); const double v_cell_y = 0.5 * (velocity_y[(ii * nx + jj) - 1] + velocity_y[(ii * nx + jj)]); const double F_y = edgedx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] + mass_flux_y[(ii * nx + jj) - 1]); const double u_corner_y = (v_cell_y > 0.0) ? velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_cell_y * dt_h) : velocity_x[(ii * (nx + 1) + jj)] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_cell_y * dt_h); momentum_x_flux_y[(ii * (nx + 1) + jj)] = F_y * u_corner_y; } } } // Calculates the y momentum flux in the x dimension void momentum_y_flux_in_x(const int nx, const int ny, Mesh* mesh, const double dt_h, const double* velocity_x, double* velocity_y, double* momentum_y_flux_x, const double* mass_flux_x, const double* edgedx, const double* celldy, const double* celldx) { const int pad = mesh->pad; // Calculate the corner centered y momentum fluxes in the x direction // Calculate the cell centered y momentum fluxes in the y direction #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < (ny + 1) - pad; ++ii) { for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdx = 1.0 / edgedx[jj]; const double a_x_0 = 0.5 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 2]); const double b_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj) - 1] - velocity_y[(ii * nx + jj) - 2]); const double c_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 1]); const double a_x_1 = 0.5 * invdx * (velocity_y[(ii * nx + jj) + 1] - velocity_y[(ii * nx + jj) - 1]); const double b_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 1]); const double c_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj) + 1] - velocity_y[(ii * nx + jj)]); // Calculate the interpolated densities const double F_x = celldy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] + mass_flux_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double u_cell_x = 0.5 * (velocity_x[(ii * (nx + 1) + jj)] + velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double v_cell_x_interp = (u_cell_x > 0.0) ? velocity_y[(ii * nx + jj) - 1] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_cell_x * dt_h) : velocity_y[(ii * nx + jj)] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_cell_x * dt_h); momentum_y_flux_x[(ii * (nx + 1) + jj)] = F_x * v_cell_x_interp; } } } // Calculates the y momentum flux in the y dimension void momentum_y_flux_in_y(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_y, double* momentum_y_flux_y, const double* mass_flux_y, const double* edgedy, const double* celldx, const double* celldy) { const int pad = mesh->pad; #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) #else #pragma omp target teams distribute parallel for #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdy = 1.0 / edgedy[ii]; const double a_y_0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj) - nx]); const double b_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx]); const double c_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double a_y_1 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj)]); const double b_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double c_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj) + nx]); const double F_y = celldx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] + mass_flux_y[(ii * nx + jj) + nx]); const double v_cell_y = 0.5 * (velocity_y[(ii * nx + jj)] + velocity_y[(ii * nx + jj) + nx]); const double v_cell_y_interp = (v_cell_y > 0.0) ? velocity_y[(ii * nx + jj)] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_cell_y * dt_h) : velocity_y[(ii * nx + jj) + nx] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_cell_y * dt_h); momentum_y_flux_y[(ii * nx + jj)] = F_y * v_cell_y_interp; } } } // Prints some conservation values void print_conservation(const int nx, const int ny, double* density0, double* energy, double* reduce_array, Mesh* mesh) { double mass_tot = 0.0; double energy_tot = 0.0; const int pad = mesh->pad; #ifdef CLANG #pragma omp target teams distribute parallel for collapse(2) num_teams(14) \ num_threads(1024) \ map(tofrom : mass_tot, energy_tot) reduction(+ : mass_tot, energy_tot) #else #pragma omp target teams distribute parallel for map( \ tofrom : mass_tot, energy_tot) reduction(+ : mass_tot, energy_tot) #endif for (int ii = pad; ii < ny - pad; ++ii) { for (int jj = pad; jj < nx - pad; ++jj) { mass_tot += density0[(ii * nx + jj)]; energy_tot += density0[(ii * nx + jj)] * energy[(ii * nx + jj)]; } } double global_mass_tot = reduce_to_master(mass_tot); double global_energy_tot = reduce_to_master(energy_tot); if (mesh->rank == MASTER) { printf("Total mass: %.12e\n", global_mass_tot); printf("Total energy: %.12e\n", global_energy_tot); } }
GB_unaryop__identity_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_uint16 // op(A') function: GB_tran__identity_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ne_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint32) // A*D function (colscale): GB (_AxD__ne_uint32) // D*A function (rowscale): GB (_DxB__ne_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint32) // C=scalar+B GB (_bind1st__ne_uint32) // C=scalar+B' GB (_bind1st_tran__ne_uint32) // C=A+scalar GB (_bind2nd__ne_uint32) // C=A'+scalar GB (_bind2nd_tran__ne_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <locale.h> #include <stdbool.h> #include <fcntl.h> #include <stdint.h> #include <x86intrin.h> #include <pthread.h> #include <time.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/mman.h> #include <papi.h> #include <getopt.h> #define TRACEPOINT_DEFINE #define TRACEPOINT_CREATE_PROBES #include "tp.h" #define PROGNAME "io-test" #define NUM_EVENTS 1 #define NSECS_IN_MSEC 1000000 #define NSECS_IN_SEC 1000000000 #define BYTES_IN_MBYTE 1000000 static const char *const progname = PROGNAME; static const int MY_PAGE_SIZE = 4096; static const int DEFAULT_ITERATIONS = 10000; static const int DEFAULT_CHUNK_SIZE = 2048 * MY_PAGE_SIZE; static const int DEFAULT_THREADS = 1; struct vars { char *filename; int iterations; int threads; off_t chunk_size; bool verbose; bool worst_case; bool prefault; }; __attribute__((noreturn)) static void usage(void) { fprintf(stderr, "Usage: %s [OPTIONS] file\n", progname); fprintf(stderr, "\nOptions:\n\n"); fprintf(stderr, " --iterations, -i set number of iterations per page\n"); fprintf(stderr, " --chunk-size, -c set size of chunks\n"); fprintf(stderr, " --threads, -t set number of threads\n"); fprintf(stderr, " --worst-case, -w force worst case performance\n"); fprintf(stderr, " --prefault, -p prefault pages when reading file\n"); fprintf(stderr, " --verbose, -v set verbose output\n"); exit(EXIT_FAILURE); } static void parse_opts(int argc, char **argv, struct vars *vars) { int opt; size_t loadpathlen = 0; struct option options[] = { { "help", 0, 0, 'h' }, { "verbose", 0, 0, 'v' }, { "worst-case", 0, 0, 'w' }, { "prefault", 0, 0, 'p' }, { "iterations", 1, 0, 'i' }, { "chunk-size", 1, 0, 'c' }, { "threads", 1, 0, 't' }, { 0, 0, 0, 0 }, }; int idx; while ((opt = getopt_long(argc, argv, "hvwpi:t:c:", options, &idx)) != -1) { switch (opt) { case 'i': vars->iterations = atoi(optarg); break; case 't': vars->threads = atoi(optarg); break; case 'c': vars->chunk_size = atoi(optarg); break; case 'v': vars->verbose = true; break; case 'w': vars->worst_case = true; break; case 'p': vars->prefault = true; break; case 'h': usage(); break; default: usage(); break; } } // Non-option arg for filename if (optind >= argc) { fprintf(stderr, "File name missing.\n"); usage(); } else { vars->filename = argv[optind]; } // Default values if (vars->iterations == 0) { vars->iterations = DEFAULT_ITERATIONS; if (vars->verbose) { printf("using default iterations: %d\n", vars->iterations); } } if (vars->threads == 0) { vars->threads = DEFAULT_THREADS; if (vars->verbose) { printf("using default threads: %d\n", vars->threads); } } if (vars->chunk_size == 0 && !vars->worst_case) { vars->chunk_size = DEFAULT_CHUNK_SIZE; if (vars->verbose) { printf("using default chunk size: %ld\n", vars->chunk_size); } } } off_t filesize(int fd) { struct stat stats; int ret = -1; if (fstat(fd, &stats) == 0) { ret = stats.st_size; } return ret; } struct timespec time_diff(struct timespec start,struct timespec end) { struct timespec ret; if ((end.tv_nsec - start.tv_nsec) < 0) { ret.tv_sec = end.tv_sec - start.tv_sec - 1; ret.tv_nsec = NSECS_IN_SEC + end.tv_nsec - start.tv_nsec; } else { ret.tv_sec = end.tv_sec - start.tv_sec; ret.tv_nsec = end.tv_nsec - start.tv_nsec; } return ret; } int main(int argc, char **argv) { tracepoint(tracekit, begin); int iterations; int fd; off_t length; int pages; struct timespec start, end; int advice, mmap_flags; volatile uint64_t sum = 0; struct vars *vars = calloc(1, sizeof(struct vars)); parse_opts(argc, argv, vars); setlocale(LC_NUMERIC, ""); fd = open(vars->filename, O_RDONLY); if (fd == -1) { fprintf(stderr, "Error: cannot open file %s\n", vars->filename); exit(EXIT_FAILURE); } length = filesize(fd); if (length == -1) { fprintf(stderr, "Error: cannot get file size.\n"); exit(EXIT_FAILURE); } if (vars->chunk_size == -1 || vars->worst_case) { vars->chunk_size = length; } if (vars->chunk_size % MY_PAGE_SIZE != 0) { vars->chunk_size += (MY_PAGE_SIZE - (vars->chunk_size % MY_PAGE_SIZE)); printf("Growing chunk size to nearest page multiple: %'jd\n", vars->chunk_size); } if (vars->worst_case) { advice = MADV_RANDOM; } else { advice = MADV_SEQUENTIAL; } mmap_flags = MAP_PRIVATE; if (vars->prefault) { mmap_flags |= MAP_POPULATE; } pages = length / MY_PAGE_SIZE; if (vars->verbose) { printf("pages=%d\n", pages); } // Initialize PAPI int events[NUM_EVENTS] = {PAPI_TOT_INS}; PAPI_library_init(PAPI_VER_CURRENT); PAPI_thread_init(pthread_self); omp_set_num_threads(vars->threads); clock_gettime(CLOCK_MONOTONIC, &start); #ifndef NO_OMP #pragma omp parallel reduction(+:sum) #endif { int i, j; long long int values[NUM_EVENTS]; uint8_t *buf; off_t to_read; int pages = 0; int iterations = vars->iterations; off_t offset = 0; PAPI_start_counters(events, NUM_EVENTS); while (offset < length) { off_t remaining = length - offset; to_read = remaining > vars->chunk_size ? vars->chunk_size : remaining; buf = mmap(NULL, to_read, PROT_READ, mmap_flags, fd, offset); if (buf == MAP_FAILED) { perror("mmap"); exit(EXIT_FAILURE); } offset += to_read; madvise(buf, to_read, advice); #ifndef NO_OMP #pragma omp for #endif for (i = 0; i < to_read; i+= MY_PAGE_SIZE) { // Use only one byte per page sum += buf[i]; for (j = 0; j < iterations; j++) { sum++; } pages++; } munmap(buf, to_read); } PAPI_read_counters(values, NUM_EVENTS); if (vars->verbose) { printf("Thread %d pages:%'d instr/page:%'lld total instr:%'lld\n", omp_get_thread_num(), pages, values[0]/pages, values[0]); printf("Thread %d sum:%'lu\n", omp_get_thread_num(), sum); } } clock_gettime(CLOCK_MONOTONIC, &end); printf("sum=%'lu\n", sum); struct timespec diff = time_diff(start, end); double time = (double)diff.tv_sec + ((double)diff.tv_nsec / (double)NSECS_IN_SEC); printf("Time (s): %ld.%ld\n", diff.tv_sec, diff.tv_nsec / NSECS_IN_MSEC); printf("Bandwidth (MB/s): %f\n", ((double)length/time)/(double)BYTES_IN_MBYTE); tracepoint(tracekit, end); return 0; }
cloud.c
#include <string> #include <iostream> #include <algorithm> #include <utility> #include <tfhe/tfhe.h> #include <tfhe/tfhe_io.h> #include <stdio.h> #include <time.h> #include <vector> #include <cassert> #include <sys/time.h> #include <omp.h> #include <fstream> using namespace std; ifstream read; #define T_FILE "averagestandard.txt" void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *carry = new_LweSample_array(1, in_out_params); LweSample *axc = new_LweSample_array(1, in_out_params); LweSample *bxc = new_LweSample_array(1, in_out_params); bootsCOPY(carry, c, keyset); for(int32_t i = 0; i < nb_bits; i++) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(axc, x + i, carry, keyset); #pragma omp section bootsXOR(bxc, y + i, carry, keyset); } #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(sum + i, x + i, bxc, keyset); #pragma omp section bootsAND(axc, axc, bxc, keyset); } bootsXOR(carry, carry, axc, keyset); } bootsCOPY(carryover, carry, keyset); delete_LweSample_array(1, carry); delete_LweSample_array(1, axc); delete_LweSample_array(1, bxc); } void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsCONSTANT(result + i, 0, keyset);} } void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsNOT(result + i, x + i, keyset);} } void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *sum = new_LweSample_array(32, in_out_params); LweSample *sum2 = new_LweSample_array(32, in_out_params); LweSample *sum3 = new_LweSample_array(32, in_out_params); LweSample *carryover = new_LweSample_array(32, in_out_params); LweSample *carryover2 = new_LweSample_array(32, in_out_params); LweSample *carryover3 = new_LweSample_array(32, in_out_params); for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum + i, 0, keyset); bootsCONSTANT(sum2 + i, 0, keyset); bootsCONSTANT(sum3 + i, 0, keyset); bootsCONSTANT(carryover + i, 0, keyset); bootsCONSTANT(carryover2 + i, 0, keyset); bootsCONSTANT(carryover3 + i, 0, keyset); } //adding the 2nd result with the carry add(sum, carryover, e, b, carry, nb_bits, keyset); add(sum2, carryover2, d, a, carryover, nb_bits, keyset); add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset); for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult + i, sum3 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult2 + i, sum2 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult3 + i, sum + i, keyset); } delete_LweSample_array(32, sum); delete_LweSample_array(32, sum2); delete_LweSample_array(32, sum3); delete_LweSample_array(32, carryover); delete_LweSample_array(32, carryover2); delete_LweSample_array(32, carryover3); } void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, b + i, keyset); } } if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside for (int32_t i = 0; i < 32 - round; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //the rest of the bits that couldnt fit inside for (int32_t i = 0; i < round; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c2 + i, keyset); bootsCOPY(result2 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); } void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, c + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, c + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 //repeats 31 times for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //the rest of tmp2 to tmp3c3 //repeats 1 time for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c3 + i, keyset); bootsCOPY(result2 + i, sum3c2 + i, keyset); bootsCOPY(result3 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); } void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *sum3c4 = new_LweSample_array(32, in_out_params); LweSample *sum3c5 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3 = new_LweSample_array(32, in_out_params); LweSample *tmp4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c5 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); LweSample *carry5 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(sum3c4 + i, 0, keyset); bootsCONSTANT(sum3c5 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3 + i, 0, keyset); bootsCONSTANT(tmp4 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(tmp3c5 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); bootsCONSTANT(carry5 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(4) { #pragma omp section bootsAND(tmp + k, a + k, e + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, e + i, keyset); #pragma omp section bootsAND(tmp3 + k, c + k, e + i, keyset); #pragma omp section bootsAND(tmp4 + k, d + k, e + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //remaining tmp2 to tmp3c3 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } //some of tmp3 to remaining tmp3c3 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset); } } //rest of tmp3 to tmp3c4 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset); } } //some of tmp4 to remaining tmp3c4 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset); } } //rest of tmp4 to tmp3c5 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset); add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c5 + i, keyset); bootsCOPY(result2 + i, sum3c4 + i, keyset); bootsCOPY(result3 + i, sum3c3 + i, keyset); bootsCOPY(result4 + i, sum3c2 + i, keyset); bootsCOPY(result5 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, sum3c4); delete_LweSample_array(32, sum3c5); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3); delete_LweSample_array(32, tmp4); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, tmp3c5); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); delete_LweSample_array(32, carry5); } int main() { // dragonfly_cipher_cloud should have already appended 2 cipherstreams into cloud.data printf("Reading the key...\n"); // reads the cloud key from file FILE* cloud_key = fopen("cloud.key", "rb"); TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key); fclose(cloud_key); FILE* nbit_key = fopen("nbit.key","rb"); TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key); fclose(nbit_key); // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* params = bk->params; // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params; // Create ciphertext blocks for negative1, bit1, negative2, bit2 and values LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); printf("Reading input 1...\n"); // reads ciphertexts from cloud.data FILE* cloud_data = fopen("cloud.data", "rb"); for (int i = 0; i<32; i++) // line0 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams); for (int i = 0; i<32; i++) // line1 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams); // Decrypts bit size1 int32_t int_bit1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0; int_bit1 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params); for (int i = 0; i<32; i++) // line10 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params); printf("Reading input 2...\n"); for (int i = 0; i<32; i++) // line11 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams); for (int i = 0; i<32; i++) // line12 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams); // Decrypts bit size2 int32_t int_bit2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0; int_bit2 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params); for (int i = 0; i<32; i++) // line21 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params); printf("Reading operation code...\n"); // Get Operation Code from File int32_t int_op; read.open("operator.txt"); read >> int_op; // Homomorphic encryption to add negative1 and negative2 ciphertexts LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); // add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE // Decrypts Negative1 int32_t int_negative1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0; int_negative1 |= (ai<<i); } std::cout << int_negative1 << " => negative1" << "\n"; // convert first value negativity code from 2 to 1 if (int_negative1 == 2){ int_negative1 = 1;} // Decrypts Negative2 int32_t int_negative2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0; int_negative2 |= (ai<<i); } std::cout << int_negative2 << " => negative2" << "\n"; // Add Negatives. // If both v1 & v2 are positive, int_negative = 0 // If only v1 is negative, int_negative = 1 // If only v2 is negative, int_negative = 2 // If both v1 & v2 are negative, int_negative = 3 int32_t int_negative; int_negative = (int_negative1 + int_negative2); // std::cout << int_negative << " -> negatives" << "\n"; //export the negative and bit data for the verif FILE* answer_data = fopen("answer.data", "wb"); // Write negative to answer.data int32_t ciphernegative = 0; if (int_negative == 1){ ciphernegative = 1; } if (int_negative == 2){ ciphernegative = 2; } if (int_negative == 3){ ciphernegative = 4; } for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey); } for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams); std::cout << ciphernegative << " => total negatives" << "\n"; delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative); // Compare bit sizes int32_t int_bit = 0; if (int_op == 4){ if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);} else{int_bit = (int_bit2 * 2);} for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);} for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; if (int_bit1 >= int_bit2){int_bit = int_bit1;} else{int_bit = int_bit2;} } else if (int_bit1 >= int_bit2) { int_bit = int_bit1; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } else{ int_bit = int_bit2; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } fclose(cloud_data); // If trying to multiply a 256 bit number if ((int_op == 4) && (int_bit >= 256)){ std::cout << "Cannot multiply 256 bit number!" << "\n"; fclose(answer_data); return 126; } // Addition //if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative) // A+B, [(-A)+(-B)], A-(-B), (-A)-(B) if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) { if (int_op == 1){ std::cout << int_bit << " bit Addition computation" << "\n"; }else{ std::cout << int_bit << " bit Subtraction computation" << "\n"; } //32 Bit Addition if (int_bit == 32) { // Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); // Timings gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); printf("writing the answer to file...\n"); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //64 Bit Addition if (int_bit == 64) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //128 Bit Addition if (int_bit == 128) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); // Timing gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //256 Bit Addition if (int_bit == 256) { // do some operations on the ciphertexts: here, we will compute the // addition of the two LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); // Timing struct timeval start, end; double get_time; gettimeofday(&start, NULL); add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk); add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk); add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk); add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the 64 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, carry5); delete_gate_bootstrapping_ciphertext_array(32, carry6); delete_gate_bootstrapping_ciphertext_array(32, carry7); delete_gate_bootstrapping_ciphertext_array(32, carry8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } } // Subtraction // If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){ // Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B) if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n"; } //32 Bit Subtraction if(int_bit == 32) { printf("Doing the homomorphic computation...\n"); LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //64 Bit Subtraction if(int_bit == 64) { LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //128 Bit Subtraction if(int_bit == 128) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } //256 Bit Subtraction if (int_bit == 256) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); NOT(inverse5, ciphertext13, bk, 32); NOT(inverse6, ciphertext14, bk, 32); NOT(inverse7, ciphertext15, bk, 32); NOT(inverse8, ciphertext16, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) //result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) //result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) //result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) //result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) //result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) //result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } } //Addition (for subtraction) with value 1 being a negative number (-A)+B else{ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n"; } if(int_bit == 32){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } else if (int_bit == 64){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } else if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value,(-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } else if (int_bit == 256){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); NOT(inverse5, ciphertext5, bk, 32); NOT(inverse6, ciphertext6, bk, 32); NOT(inverse7, ciphertext7, bk, 32); NOT(inverse8, ciphertext8, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // 1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); } } } // If Multiplication else if (int_op == 4){ std::cout << int_bit << " bit Multiplication computation" << "\n"; if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk); //result3 mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk); //result4 mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk); add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk); add(sum2, carryover2, result9, result3,carryover1,32, bk); add(sum3, carryover3, result8, result2,carryover2,32, bk); add(sum4, carryover4, result7, result1,carryover3,32, bk); add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk); add(sum6, carryover6, sum2, result15,carryover5,32, bk); add(sum7, carryover7, sum3, result14,carryover6,32, bk); add(sum8, carryover8, sum4, result13,carryover7,32, bk); add(sum9, carryover9, sum5, result12,carryover8,32, bk); add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk); add(sum11, carryover11, sum7, result20,carryover10,32, bk); add(sum12, carryover12, sum8, result19,carryover11,32, bk); add(sum13, carryover13, sum9, result18,carryover12,32, bk); add(sum14, carryover14, sum10, result17,carryover13,32, bk); add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params); for (int i=0; i<32; i++) // result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params); for (int i=0; i<32; i++) // result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params); for (int i=0; i<32; i++) // result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params); for (int i=0; i<32; i++) // result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, result9); delete_gate_bootstrapping_ciphertext_array(32, result10); delete_gate_bootstrapping_ciphertext_array(32, result11); delete_gate_bootstrapping_ciphertext_array(32, result12); delete_gate_bootstrapping_ciphertext_array(32, result13); delete_gate_bootstrapping_ciphertext_array(32, result14); delete_gate_bootstrapping_ciphertext_array(32, result15); delete_gate_bootstrapping_ciphertext_array(32, result16); delete_gate_bootstrapping_ciphertext_array(32, result17); delete_gate_bootstrapping_ciphertext_array(32, result18); delete_gate_bootstrapping_ciphertext_array(32, result19); delete_gate_bootstrapping_ciphertext_array(32, result20); delete_gate_bootstrapping_ciphertext_array(32, sum1); delete_gate_bootstrapping_ciphertext_array(32, sum2); delete_gate_bootstrapping_ciphertext_array(32, sum3); delete_gate_bootstrapping_ciphertext_array(32, sum4); delete_gate_bootstrapping_ciphertext_array(32, sum5); delete_gate_bootstrapping_ciphertext_array(32, sum6); delete_gate_bootstrapping_ciphertext_array(32, sum7); delete_gate_bootstrapping_ciphertext_array(32, sum8); delete_gate_bootstrapping_ciphertext_array(32, sum9); delete_gate_bootstrapping_ciphertext_array(32, sum10); delete_gate_bootstrapping_ciphertext_array(32, sum11); delete_gate_bootstrapping_ciphertext_array(32, sum12); delete_gate_bootstrapping_ciphertext_array(32, sum13); delete_gate_bootstrapping_ciphertext_array(32, sum14); delete_gate_bootstrapping_ciphertext_array(32, sum15); delete_gate_bootstrapping_ciphertext_array(32, carryover1); delete_gate_bootstrapping_ciphertext_array(32, carryover2); delete_gate_bootstrapping_ciphertext_array(32, carryover3); delete_gate_bootstrapping_ciphertext_array(32, carryover4); delete_gate_bootstrapping_ciphertext_array(32, carryover5); delete_gate_bootstrapping_ciphertext_array(32, carryover6); delete_gate_bootstrapping_ciphertext_array(32, carryover7); delete_gate_bootstrapping_ciphertext_array(32, carryover8); delete_gate_bootstrapping_ciphertext_array(32, carryover9); delete_gate_bootstrapping_ciphertext_array(32, carryover10); delete_gate_bootstrapping_ciphertext_array(32, carryover11); delete_gate_bootstrapping_ciphertext_array(32, carryover12); delete_gate_bootstrapping_ciphertext_array(32, carryover13); delete_gate_bootstrapping_ciphertext_array(32, carryover14); delete_gate_bootstrapping_ciphertext_array(32, carryover15); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); } else if (int_bit == 64){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk); split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_ciphertext_array(32, finalresult); delete_gate_bootstrapping_ciphertext_array(32, finalresult2); delete_gate_bootstrapping_ciphertext_array(32, finalresult3); delete_gate_bootstrapping_cloud_keyset(bk); } else if (int_bit == 32){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); } } }
sections_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp sections { argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } } void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}} #pragma omp sections // expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}} #pragma omp sections foo void test_no_clause() { int i; #pragma omp sections { foo(); } // expected-error@+2 {{the statement for '#pragma omp sections' must be a compound statement}} #pragma omp sections ++i; #pragma omp sections { foo(); foo(); // expected-error {{statement in 'omp sections' directive must be enclosed into a section region}} } } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp sections { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } #pragma omp section if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L3; else if (i == 8) { L3: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; goto L3; // expected-error {{use of undeclared label 'L3'}} } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections foo bar { foo(); // expected-error@+1 {{unexpected OpenMP clause 'nowait' in directive '#pragma omp section'}} #pragma omp section nowait ; } } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections; { foo(); } #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp sections'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections linear(x); { foo(); } #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections private(x); { foo(); } #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections, private(x); { foo(); } } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp sections private( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections private(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections private(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections private() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections private(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections private(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections private(x) { foo(); } #pragma omp parallel #pragma omp sections private(x, y) { foo(); } #pragma omp parallel #pragma omp sections private(x, y, z) { foo(); } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections lastprivate(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections lastprivate(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections lastprivate(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections lastprivate(x) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y, z) { foo(); } } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections firstprivate(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections firstprivate(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections firstprivate(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections lastprivate(x) firstprivate(x) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y) firstprivate(x, y) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y, z) firstprivate(x, y, z) { foo(); } } void test_nowait() { #pragma omp parallel #pragma omp sections nowait nowait // expected-error {{directive '#pragma omp sections' cannot contain more than one 'nowait' clause}} { ; } }
pooling_hcl_arm_int8.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "pooling_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <assert.h> #include <math.h> #include <string.h> #include <arm_neon.h> #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 static inline int8_t arm_max_int8(int8_t a, int8_t b) { if (a > b) return a; else return b; } static inline int8_t arm_min_int8(int8_t a, int8_t b) { if (a > b) return b; else return a; } typedef void (*pooling_kernel_int8_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale); static void pad_0_align_2D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(int8_t)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(int8_t)); } } // pad 0 in right and down side on 3D static void pad_0_align_3D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(int8_t)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D_int8(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(int8_t)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(int8_t)); } } // pad 0 in right and down side on 3D static void delete_0_3D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(int8_t)); return; } for (i = 0; i < c; ++i) { delete_0_2D_int8(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } static void avg_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 3; int remain_w = inw - outw * 2; int index = 0; for (int c = 0; c < inc; c++) { index = 0; const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; int8_t* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int16x8_t sum0 = vaddl_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int16x8_t sum1 = vaddl_s8(p01, p11); #ifdef __aarch64__ /* pairwaise max */ sum0 = vpaddq_s16(sum0, sum1); for (int n = 0; n < 8; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); int32x4_t suml1 = vpaddlq_s16(sum1); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(suml0[n] / 4); out_ptr[n + 1] = (int8_t)round(suml1[n] / 4); } #endif line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } index = block_w * 8; if (outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int16x8_t sum0 = vaddl_s8(p00, p10); #ifdef __aarch64__ /* pairwaise max */ int16x8_t sum1 = {0}; sum0 = vpaddq_s16(sum0, sum1); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(suml0[n] / 4); } #endif line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for (; index < outw; index++) { *out_ptr = (int8_t)round((line0[0] + line0[1] + line1[0] + line1[1]) / 4); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = (int8_t)round((line0[0] + line1[0]) / 2); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1) { index = 0; for (int j = 0; j < block_w; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p02 = {0}; /* pairwaise max */ int16x8_t sum0 = vaddl_s8(p00, p02); int16x8_t sum1 = vaddl_s8(p01, p02); #ifdef __aarch64__ sum0 = vpaddq_s16(sum0, sum1); for (int n = 0; n < 8; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 4); } #else int32x4_t suml0 = vpaddlq_s16(sum0); int32x4_t suml1 = vpaddlq_s16(sum1); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(suml0[n] / 4); out_ptr[n + 1] = (int8_t)round(suml1[n] / 4); } #endif line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } index = block_w * 8; if (outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = {0}; int16x8_t sum0 = vaddl_s8(p00, p01); #ifdef __aarch64__ /* pairwaise max */ int16x8_t sum1 = {0}; sum0 = vpaddq_s16(sum0, sum1); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); for (int n = 0; n < 4; n++) { out_ptr[n] = (int8_t)round(suml0[n] / 4); } #endif line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for (; index < outw; index++) { int sum0 = line0[0] + line0[1]; *out_ptr = (int8_t)round((sum0) / 2); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } #ifdef __aarch64__ int block_w = outw >> 4; #else int block_w = outw >> 3; #endif int remain_w = inw - outw * 2; int index = 0; for (int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; int8_t* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { #ifdef __aarch64__ int8x16_t p00 = vld1q_s8(line0); int8x16_t p10 = vld1q_s8(line1); int8x16_t max0 = vmaxq_s8(p00, p10); int8x16_t p01 = vld1q_s8(line0 + 16); int8x16_t p11 = vld1q_s8(line1 + 16); int8x16_t max1 = vmaxq_s8(p01, p11); /* pairwaise max */ int8x16_t _max = vpmaxq_s8(max0, max1); vst1q_s8(out_ptr, _max); line0 += 32; line1 += 32; out_ptr += 16; } index = block_w * 16; #else int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int8x8_t max1 = vmax_s8(p01, p11); /* pairwaise max */ int8x8_t _max = vpmax_s8(max0, max1); vst1_s8(out_ptr, _max); line0 += 16; line1 += 16; out_ptr += 8; } index = block_w * 8; #endif if (outw - index >= 8) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int8x8_t max1 = vmax_s8(p01, p11); /* pairwaise max */ int8x8_t _max = vpmax_s8(max0, max1); vst1_s8(out_ptr, _max); line0 += 16; line1 += 16; out_ptr = out_ptr + 8; index = index + 8; } if (outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); /* pairwaise max */ int8x8_t max1 = {0}; int8x8_t _max = vpmax_s8(max0, max1); out_ptr[0] = _max[0]; out_ptr[1] = _max[1]; out_ptr[2] = _max[2]; out_ptr[3] = _max[3]; line0 += 8; line1 += 8; out_ptr = out_ptr + 4; index = index + 4; } for (; index < outw; index++) { int8_t max0 = arm_max_int8(line0[0], line0[1]); int8_t max1 = arm_max_int8(line1[0], line1[1]); *out_ptr = arm_max_int8(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = arm_max_int8(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1 > 0) { for (int j = 0; j < block_w; j++) { #ifdef __aarch64__ int8x16_t p00 = vld1q_s8(line0); int8x16_t p01 = vld1q_s8(line0 + 16); /* pairwaise max */ int8x16_t _max = vpmaxq_s8(p00, p01); vst1q_s8(out_ptr, _max); line0 += 32; out_ptr += 16; } index = block_w * 16; #else int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); /* pairwaise max */ int8x8_t _max = vpmax_s8(p00, p01); vst1_s8(out_ptr, _max); line0 += 16; out_ptr += 8; } index = block_w * 8; #endif if (outw - index >= 8) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); /* pairwaise max */ int8x8_t _max = vpmax_s8(p00, p01); vst1_s8(out_ptr, _max); line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } if (outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); /* pairwaise max */ int8x8_t p01 = {0}; int8x8_t _max = vpmax_s8(p00, p01); out_ptr[0] = _max[0]; out_ptr[1] = _max[1]; out_ptr[2] = _max[2]; out_ptr[3] = _max[3]; line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for (; index < outw; index++) { *out_ptr = arm_max_int8(line0[0], line0[1]); out_ptr++; line0 += 2; } if (pad_w1 > 0) { *out_ptr = arm_max_int8(line0[0], line1[0]); out_ptr++; } } } } static void avg_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 3; int remain_w = inw - outw * 2; int index = 0; for (int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; const int8_t* line2 = line1 + inw; int8_t* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { index = 0; for (int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p20 = vld2_s8(line2); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); sum0 = vaddw_s8(sum0, p10.val[0]); sum0 = vaddw_s8(sum0, p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); sum0 = vaddw_s8(sum0, p11); int8x8x2_t p20_new = vld2_s8(line2 + 16); sum0 = vaddw_s8(sum0, p20.val[0]); sum0 = vaddw_s8(sum0, p20.val[1]); int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1); sum0 = vaddw_s8(sum0, p21); // sum0 = vadd_s8(vadd_s8(sum0, sum1), sum2); for (int n = 0; n < 8; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 9); } p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 16; line1 += 16; line2 += 16; out_ptr += 8; index = index + 8; } for (; index < outw; index++) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]); *out_ptr = (int8_t)round(sum / 9); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]); *out_ptr = (int8_t)round(sum / 6); out_ptr++; } else if (pad_w1 == 2) { int sum = (line0[0] + line1[0] + line2[0]); *out_ptr = (int8_t)round(sum / 6); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { index = 0; for (int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); sum0 = vaddw_s8(sum0, p10.val[0]); sum0 = vaddw_s8(sum0, p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); sum0 = vaddw_s8(sum0, p11); for (int n = 0; n < 8; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 6); } p00 = p00_new; p10 = p10_new; line0 += 16; line1 += 16; out_ptr += 8; index = index + 8; } for (; index < outw; index++) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]); *out_ptr = (int8_t)round(sum / 6); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { int sum = (line0[0] + line0[1] + line1[0] + line1[1]); *out_ptr = (int8_t)round(sum / 4); out_ptr++; } else if (pad_w1 == 2) { int sum = (line0[0] + line1[0]); *out_ptr = (int8_t)round(sum / 2); out_ptr++; } } else if (pad_h1 == 2) { index = 0; for (int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); for (int n = 0; n < 8; n++) { out_ptr[n] = (int8_t)round(sum0[n] / 3); } p00 = p00_new; line0 += 16; out_ptr += 8; index = index + 8; } for (; index < outw; index++) { *out_ptr = (int8_t)round((line0[0] + line0[1] + line0[2]) / 3); out_ptr++; line0 += 2; } if (pad_w1 == 1) { *out_ptr = (int8_t)round((line0[0] + line0[1]) / 2); out_ptr++; } else if (pad_w1 == 2) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 4; int remain_w = inw - outw * 2; int index = 0; for (int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; const int8_t* line2 = line1 + inw; int8_t* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { int8x16x2_t p00 = vld2q_s8(line0); int8x16x2_t p10 = vld2q_s8(line1); int8x16x2_t p20 = vld2q_s8(line2); for (int j = 0; j < block_w; j++) { /* p00 = [1,2,3,4,5,6,7,8...] p00.val[0]=[1,3,5,7...] max0 = [2,4,6,8...] p00_new = [9,10,11,12,13,14,15,16...] p01 = [3,5,7,9...] max0=max(max0,p01)=[3,5,7,9] */ int8x16x2_t p00_new = vld2q_s8(line0 + 32); int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]); int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_s8(max0, p01); int8x16x2_t p10_new = vld2q_s8(line1 + 32); int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]); int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_s8(max1, p11); int8x16x2_t p20_new = vld2q_s8(line2 + 32); int8x16_t max2 = vmaxq_s8(p20.val[0], p20.val[1]); int8x16_t p21 = vextq_s8(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_s8(max2, p21); max0 = vmaxq_s8(vmaxq_s8(max0, max1), max2); vst1q_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 32; line1 += 32; line2 += 32; out_ptr += 16; } index = block_w * 16; if (outw - index > 8) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p20 = vld2_s8(line2); int8x8x2_t p00_new = vld2_s8(line0 + 16); int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); max0 = vmax_s8(max0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); max1 = vmax_s8(max1, p11); int8x8x2_t p20_new = vld2_s8(line2 + 16); int8x8_t max2 = vmax_s8(p20.val[0], p20.val[1]); int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1); max2 = vmax_s8(max2, p21); max0 = vmax_s8(vmax_s8(max0, max1), max2); vst1_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 16; line1 += 16; line2 += 16; out_ptr += 8; index = index + 8; } for (; index < outw; index++) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]); int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]); int8_t max2 = arm_max_int8(arm_max_int8(line2[0], line2[1]), line2[2]); *out_ptr = arm_max_int8(arm_max_int8(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1])); *out_ptr = arm_max_int8(arm_max_int8(line2[0], line2[1]), max0); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { int8x16x2_t p00 = vld2q_s8(line0); int8x16x2_t p10 = vld2q_s8(line1); for (int j = 0; j < block_w; j++) { int8x16x2_t p00_new = vld2q_s8(line0 + 32); int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]); int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_s8(max0, p01); int8x16x2_t p10_new = vld2q_s8(line1 + 32); int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]); int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_s8(max1, p11); max0 = vmaxq_s8(max0, max1); vst1q_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 32; line1 += 32; out_ptr += 16; } index = block_w * 16; if (outw - index > 8) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p00_new = vld2_s8(line0 + 16); int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); max0 = vmax_s8(max0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); max1 = vmax_s8(max1, p11); max0 = vmax_s8(max0, max1); vst1_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 16; line1 += 16; out_ptr += 8; index = index + 8; } for (; index < outw; index++) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]); int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]); *out_ptr = arm_max_int8(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1])); out_ptr++; } } } } static void avg_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int block = in_hw >> 4; for (int c = 0; c < inc; c++) { int index = 0; const int8_t* line0 = input + c * in_hw; int8_t* out_ptr = output + c; int sum = 0; for (int j = 0; j < block; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); int16x8_t pls = vaddl_s8(p00, p01); int32x4_t tmp = vpaddlq_s16(pls); sum += vgetq_lane_s32(tmp, 0) + vgetq_lane_s32(tmp, 1) + vgetq_lane_s32(tmp, 2) + vgetq_lane_s32(tmp, 3); line0 += 16; } index = block * 16; for (int j = index; j < in_hw; j++) { sum += line0[0]; line0++; } float sum_fp32 = sum * in_scale; sum_fp32 = sum_fp32 / in_hw; int tmp = (int)round(sum_fp32 / out_scale); if (tmp > 127) tmp = 127; else if (tmp < -127) tmp = -127; *out_ptr = (int8_t)tmp; //round(sum / in_hw); } } static void max_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale, float out_scale) { int in_hw = inw * inh; int block = in_hw >> 5; for (int c = 0; c < inc; c++) { int index = 0; const int8_t* line0 = input + c * in_hw; int8_t* out_ptr = output + c; int8x16_t p00 = vld1q_s8(line0); int8x16_t res = p00; for (int j = 0; j < block; j++) { int8x16_t p00 = vld1q_s8(line0); int8x16_t p01 = vld1q_s8(line0 + 16); int8x16_t max0 = vmaxq_s8(p00, p01); res = vmaxq_s8(res, max0); line0 += 32; } int8_t max_ = 0; if (block > 0) { max_ = res[0]; #ifdef __aarch64__ for (int n = 1; n < 16; n++) { max_ = arm_max_int8(max_, res[n]); } #else max_ = arm_max_int8(max_, vgetq_lane_s8(res, 0)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 1)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 2)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 3)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 4)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 5)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 6)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 7)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 8)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 9)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 10)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 11)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 12)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 13)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 14)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 15)); #endif } else { max_ = line0[0]; } index = block * 32; for (int j = index; j < in_hw; j++) { max_ = arm_max_int8(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_int8_perf_prerun(struct tensor* input, struct tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = (pooling_kernel_int8_t)avg_global_int8; else if (param->pool_method == POOL_MAX) param->funct = (pooling_kernel_int8_t)max_global_int8; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (pool_size == POOL_K2S2) param->funct = (pooling_kernel_int8_t)max_2x2s2_int8; else if (pool_size == POOL_K3S2) param->funct = (pooling_kernel_int8_t)max_3x3s2_int8; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ else if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (pool_size == POOL_K2S2) param->funct = (pooling_kernel_int8_t)avg_2x2s2_int8; else if (pool_size == POOL_K3S2) param->funct = (pooling_kernel_int8_t)avg_3x3s2_int8; } } if (param->funct == NULL) { TLOG_ERR("perf pooling func not be find\n"); return -1; } return 0; } int pooling_kernel_int8_perf_run(struct tensor* input, struct tensor* output, struct pool_param* param, int num_thread) { int is_caffe = param->caffe_flavor; pooling_kernel_int8_t kernel = (pooling_kernel_int8_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int pad_h0 = param->pad_h0; int pad_h1 = param->pad_h1; int pad_w0 = param->pad_w0; int pad_w1 = param->pad_w1; int in_h_origin = in_h; int in_w_origin = in_w; int in_h_pad = in_h + pad_h0; int in_w_pad = in_w + pad_w0; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; float input_scale = input->scale; float output_scale = output->scale; if (param->input_pad != NULL) { param->pad_h0 = 0; param->pad_w0 = 0; in_h += 1; in_w += 1; } for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; if (param->input_pad != NULL) { pad_0_align_3D_int8((int8_t*)param->input_pad + n * c * in_h_pad * in_w_pad, (int8_t*)input_frame, in_h_origin, in_w_origin, in_h_pad, in_w_pad, c, pad_h0, pad_w0); } #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = NULL; if (param->input_pad != NULL) { cur_input = param->input_pad + ch * in_h_pad * in_w_pad * input->elem_size; } else { cur_input = input_frame + ch * in_h * in_w * input->elem_size; } void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe, input_scale, output_scale); } } return 0; }
fill_nr_s8.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <stdlib.h> #include <string.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "cvhf.h" #include "nr_direct.h" #include "optimizer.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env); /* * 8-fold symmetry, k>=l, k>=i>=j, */ static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int *atm = envs->atm; const int *bas = envs->bas; const double *env = envs->env; const int natm = envs->natm; const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const CINTOpt *cintopt = envs->cintopt; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double *cache = eri + di * dj * nao2; int dims[4] = {nao, nao, dj, di}; int ksh, lsh, ij, k, l; int shls[4]; double *peri; shls[2] = jsh; shls[3] = ish; for (ksh = 0; ksh <= ish; ksh++) { for (lsh = 0; lsh <= ksh; lsh++) { shls[0] = lsh; shls[1] = ksh; peri = eri + ao_loc[ksh] * nao + ao_loc[lsh]; if ((*fprescreen)(shls, vhfopt, atm, bas, env)) { (*intor)(peri, dims, shls, atm, natm, bas, nbas, env, cintopt, cache); } else { for (ij = 0; ij < di*dj; ij++) { for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) { for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) { peri[k*nao+l] = 0; } } peri += nao2; } } } } } static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int i, j, k, l, i0, j0, kl; size_t ij0; double *peri, *pbuf; fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs); for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) { for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) { if (i0 >= j0) { ij0 = i0*(i0+1)/2 + j0; peri = eri + ij0*(ij0+1)/2; pbuf = buf + nao2 * (i*dj+j); for (kl = 0, k = 0; k < i0; k++) { for (l = 0; l <= k; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } // k == i0 for (l = 0; l <= j0; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } } } } void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const int nao = ao_loc[nbas]; IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL, cintopt, 1}; CVHFOpt *vhfopt; CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); vhfopt->fprescreen = CVHFnr_schwarz_cond; int shls_slice[] = {0, nbas}; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1); const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { int i, j, ij; double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size)); #pragma omp for nowait schedule(dynamic, 2) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - (i*(i+1)/2); store_ij(intor, eri, buf, i, j, vhfopt, &envs); } free(buf); } CVHFdel_optimizer(&vhfopt); }
residualbased_linear_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUALBASED_LINEAR_STRATEGY ) #define KRATOS_RESIDUALBASED_LINEAR_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedLinearStrategy * @ingroup KratosCore * @brief This is a very simple strategy to solve linearly the problem * @details As a linear strategy the check on the convergence is not done and just one non linear iteration will be performed * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedLinearStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions */ ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedLinearStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param CalculateReactionFlag The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param CalculateNormDxFlag The flag sets if the norm of Dx is computed * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedLinearStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, bool CalculateReactionFlag = false, bool ReformDofSetAtEachStep = false, bool CalculateNormDxFlag = false, bool MoveMeshFlag = false ) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag) { KRATOS_TRY mCalculateReactionsFlag = CalculateReactionFlag; mReformDofSetAtEachStep = ReformDofSetAtEachStep; mCalculateNormDxFlag = CalculateNormDxFlag; // Saving the scheme mpScheme = pScheme; // Saving the linear solver mpLinearSolver = pNewLinearSolver; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer ( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (mpLinearSolver) ); // Set flag to start correcty the calculations mSolutionStepIsInitialized = false; mInitializeWasPerformed = false; // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) this->SetEchoLevel(1); // By default the matrices are rebuilt at each solution step BaseType::SetRebuildLevel(1); KRATOS_CATCH("") } /** * Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewBuilderAndSolver The builder and solver employed * @param CalculateReactionFlag The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param CalculateNormDxFlag The flag sets if the norm of Dx is computed * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedLinearStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, bool CalculateReactionFlag = false, bool ReformDofSetAtEachStep = false, bool CalculateNormDxFlag = false, bool MoveMeshFlag = false ) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag) { KRATOS_TRY mCalculateReactionsFlag = CalculateReactionFlag; mReformDofSetAtEachStep = ReformDofSetAtEachStep; mCalculateNormDxFlag = CalculateNormDxFlag; // Saving the scheme mpScheme = pScheme; // Saving the linear solver mpLinearSolver = pNewLinearSolver; // Setting up the builder and solver mpBuilderAndSolver = pNewBuilderAndSolver; // Set flag to start correcty the calculations mSolutionStepIsInitialized = false; mInitializeWasPerformed = false; // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); //set EchoLevel to the default value (only time is displayed) this->SetEchoLevel(1); // By default the matrices are rebuilt at each solution step BaseType::SetRebuildLevel(1); KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedLinearStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). auto p_linear_solver = GetBuilderAndSolver()->GetLinearSystemSolver(); if (p_linear_solver != nullptr) p_linear_solver->Clear(); // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); this->Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT:*/ /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator(); //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if(mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); this->GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints(); const int local_number_of_constraints = r_constraints_array.size(); const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints); if(global_number_of_constraints != 0) { const auto& rProcessInfo = BaseType::GetModelPart().GetProcessInfo(); auto it_begin = BaseType::GetModelPart().MasterSlaveConstraints().begin(); #pragma omp parallel for firstprivate(it_begin) for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_begin+i)->ResetSlaveDofs(rProcessInfo); #pragma omp parallel for firstprivate(it_begin) for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_begin+i)->Apply(rProcessInfo); //the following is needed since we need to eventually compute time derivatives after applying //Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH("") } /** * @brief The problem of interest is solved * @details a double containing norm(Dx) is returned if CalculateNormDxFlag == true, else 0 is returned * @return norm(Dx) */ double Solve() override { BaseType::Solve(); //calculate if needed the norm of Dx double norm_dx = 0.00; if (mCalculateNormDxFlag == true) norm_dx = TSparseSpace::TwoNorm(*mpDx); return norm_dx; } /** * @brief Clears the internal storage * @note NULL could be changed to nullptr in the future (c++11) */ void Clear() override { KRATOS_TRY; // If the preconditioner is saved between solves, it // should be cleared here. GetBuilderAndSolver()->GetLinearSystemSolver()->Clear(); if (mpA != NULL) SparseSpaceType::Clear(mpA); if (mpDx != NULL) SparseSpaceType::Clear(mpDx); if (mpb != NULL) SparseSpaceType::Clear(mpb); // Setting to zero the internal flag to ensure that the dof sets are recalculated GetBuilderAndSolver()->SetDofSetIsInitializedFlag(false); GetBuilderAndSolver()->Clear(); GetScheme()->Clear(); mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results (e.g. stresses) need to be calculated given the solution of the step *@details This operations should be called only when needed, before printing as it can involve a non negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. * @todo Boost dependencies should be replaced by std equivalent */ void InitializeSolutionStep() override { KRATOS_TRY if (mSolutionStepIsInitialized == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); const int rank = BaseType::GetModelPart().GetCommunicator().MyPID(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, BaseType::GetModelPart()); KRATOS_INFO_IF("Setup Dofs Time", BaseType::GetEchoLevel() > 0 && rank == 0) << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(BaseType::GetModelPart()); KRATOS_INFO_IF("Setup System Time", BaseType::GetEchoLevel() > 0 && rank == 0) << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, BaseType::GetModelPart()); KRATOS_INFO_IF("System Matrix Resize Time", BaseType::GetEchoLevel() > 0 && rank == 0) << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("System Construction Time", BaseType::GetEchoLevel() > 0 && rank == 0) << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); //initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH("") } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType &rA = *mpA; TSystemVectorType &rDx = *mpDx; TSystemVectorType &rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; //deallocate the systemvectors if needed if (mReformDofSetAtEachStep == true) { SparseSpaceType::Clear(mpA); SparseSpaceType::Clear(mpDx); SparseSpaceType::Clear(mpb); this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; p_scheme->InitializeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb); if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); // passing smart pointers instead of references here // to prevent dangling pointer to system matrix when // reusing ml preconditioners in the trilinos tpl p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); BaseType::mStiffnessMatrixIsBuilt = true; } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); } // Debugging info EchoInfo(); //update results DofsArrayType& r_dof_set = p_builder_and_solver->GetDofSet(); p_scheme->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); //move the mesh if needed if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh(); p_scheme->FinalizeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb); // Calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); return true; } /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType& GetSystemMatrix() { TSystemMatrixType& mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief This method returns the residual norm * @return The residual norm */ double GetResidualNorm() override { if (TSparseSpace::Size(*mpb) != 0) return TSparseSpace::TwoNorm(*mpb); else return 0.0; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedLinearStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TLinearSolver::Pointer mpLinearSolver; /// The pointer to the linear solver considered typename TSchemeType::Pointer mpScheme; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver; /// The pointer to the builder and solver employed TSystemVectorPointerType mpDx; /// The incremement in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; bool mCalculateNormDxFlag; /// Calculates if required the norm of the correction term Dx /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step bool mInitializeWasPerformed; /// Flag to set as initialized the strategy ///@} ///@name Private Operators*/ ///@{ /** * @brief This method returns the components of the system of equations depending of the echo level */ virtual void EchoInfo() { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (BaseType::GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char*) (matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char*) (matrix_market_vectname.str()).c_str(), rb); } } ///@} ///@name Private Operations*/ ///@{ ///@} ///@name Private Access */ ///@{ ///@} ///@name Private Inquiry */ ///@{ ///@} ///@name Un accessible methods */ ///@{ /** Copy constructor. */ ResidualBasedLinearStrategy(const ResidualBasedLinearStrategy& Other); ///@} }; /* Class ResidualBasedLinearStrategy */ ///@} ///@name Type Definitions */ ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_LINEAR_STRATEGY defined */
e2.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main () { int i, n; float a[100], b[100], sum; /* Some initializations */ n = 100; for (i=0; i < n; i++) a[i] = b[i] = i * 1.0; sum = 0.0; #pragma omp parallel shared(sum, a, b) private(i) { printf("Hello from thread: %d\n", omp_get_thread_num()); #pragma omp for schedule(guided) reduction(+:sum) nowait for (i=0; i < n; i++) sum = sum + (a[i] * b[i]); } printf("Sum = %f\n",sum); return 0; }
LAGraph_BF_full1.c
//------------------------------------------------------------------------------ // LAGraph_BF_full1.c: Bellman-Ford single-source shortest paths, returns tree, // while diagonal of input matrix A needs not to be explicit 0 //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact [email protected] for the full terms. //------------------------------------------------------------------------------ // LAGraph_BF_full1: Bellman-Ford single source shortest paths, returning both // the path lengths and the shortest-path tree. contributed by Jinhao Chen and // Tim Davis, Texas A&M. // LAGraph_BF_full1 performs a Bellman-Ford to find out shortest path, parent // nodes along the path and the hops (number of edges) in the path from given // source vertex s in the range of [0, n) on graph given as matrix A with size // n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i // to vertex j with weight w, then A(i, j) = w. // LAGraph_BF_full1 returns GrB_SUCCESS if it succeeds. In this case, there // are no negative-weight cycles in the graph, and d, pi, and h are returned. // The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1, // where p is the parent node of k-th node in the shortest path. In particular, // pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest // path. // If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the // GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and // *ph_output respectively) will be NULL when negative-weight cycle detected. // Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and // so on, can be returned, if these errors are found by the underlying // GrB_* functions. //------------------------------------------------------------------------------ #define LAGraph_FREE_WORK \ { \ GrB_free(&d); \ GrB_free(&dmasked); \ GrB_free(&dless); \ GrB_free(&Atmp); \ GrB_free(&BF_Tuple3); \ GrB_free(&BF_lMIN_Tuple3); \ GrB_free(&BF_PLUSrhs_Tuple3); \ GrB_free(&BF_LT_Tuple3); \ GrB_free(&BF_lMIN_Tuple3_Monoid); \ GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \ LAGraph_Free ((void**)&I); \ LAGraph_Free ((void**)&J); \ LAGraph_Free ((void**)&w); \ LAGraph_Free ((void**)&W); \ LAGraph_Free ((void**)&h); \ LAGraph_Free ((void**)&pi); \ } #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK \ GrB_free (pd_output); \ GrB_free (ppi_output); \ GrB_free (ph_output); \ } #include <LAGraph.h> #include <LAGraphX.h> #include <LG_internal.h> // from src/utility typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ; //------------------------------------------------------------------------------ // data type for each entry of the adjacent matrix A and "distance" vector d; // <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and // the value <0, 0, NULL> corresponds to a path from a vertex to itself //------------------------------------------------------------------------------ typedef struct { double w; // w corresponds to a path weight. GrB_Index h; // h corresponds to a path size or number of hops. GrB_Index pi;// pi corresponds to the penultimate vertex along a path. // vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil) // for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E } BF1_Tuple3_struct; //------------------------------------------------------------------------------ // 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3 //------------------------------------------------------------------------------ void BF1_lMIN ( BF1_Tuple3_struct *z, const BF1_Tuple3_struct *x, const BF1_Tuple3_struct *y ) { if (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) { if (z != x) { *z = *x; } } else { *z = *y; } } void BF1_PLUSrhs ( BF1_Tuple3_struct *z, const BF1_Tuple3_struct *x, const BF1_Tuple3_struct *y ) { z->w = x->w + y->w; z->h = x->h + y->h; z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ; } void BF1_Identity ( BF1_Tuple3_struct *z, const BF1_Tuple3_struct *x ) { *z = *x; } void BF1_LT ( bool *z, const BF1_Tuple3_struct *x, const BF1_Tuple3_struct *y ) { (*z) = (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) ; } // Given a n-by-n adjacency matrix A and a source vertex s. // If there is no negative-weight cycle reachable from s, return the distances // of shortest paths from s and parents along the paths as vector d. Otherwise, // returns d=NULL if there is a negtive-weight cycle. // pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the // sum of edges length in the shortest path // ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the // parent of i-th vertex in the shortest path // ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the // number of edges from s to i in the shortest path // A has weights on corresponding entries of edges // s is given index for source vertex GrB_Info LAGraph_BF_full1 ( GrB_Vector *pd_output, //the pointer to the vector of distance GrB_Vector *ppi_output, //the pointer to the vector of parent GrB_Vector *ph_output, //the pointer to the vector of hops const GrB_Matrix A, //matrix for the graph const GrB_Index s //given index of the source ) { GrB_Info info; char *msg = NULL ; // tmp vector to store distance vector after n (i.e., V) loops GrB_Vector d = NULL, dmasked = NULL, dless = NULL; GrB_Matrix Atmp = NULL; GrB_Type BF_Tuple3; GrB_BinaryOp BF_lMIN_Tuple3; GrB_BinaryOp BF_PLUSrhs_Tuple3; GrB_UnaryOp BF_Identity_Tuple3; GrB_BinaryOp BF_LT_Tuple3; GrB_Monoid BF_lMIN_Tuple3_Monoid; GrB_Semiring BF_lMIN_PLUSrhs_Tuple3; GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A GrB_Index *h = NULL, *pi = NULL; double *w = NULL; BF1_Tuple3_struct *W = NULL; LG_CHECK (A == NULL || pd_output == NULL || ppi_output == NULL || ph_output == NULL, -1001, "inputs are NULL") ; *pd_output = NULL; *ppi_output = NULL; *ph_output = NULL; GrB_TRY (GrB_Matrix_nrows (&nrows, A)) ; GrB_TRY (GrB_Matrix_ncols (&ncols, A)) ; GrB_TRY (GrB_Matrix_nvals (&nz, A)); LG_CHECK (nrows != ncols, -1002, "A must be square") ; n = nrows; LG_CHECK (s >= n || s < 0, -1003, "invalid source node") ; //-------------------------------------------------------------------------- // create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring //-------------------------------------------------------------------------- // GrB_Type GrB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF1_Tuple3_struct))); // GrB_BinaryOp GrB_TRY (GrB_UnaryOp_new(&BF_Identity_Tuple3, (void*) (&BF1_Identity), BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_LT_Tuple3, (LAGraph_binary_function) (&BF1_LT), GrB_BOOL, BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3, (LAGraph_binary_function) (&BF1_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3, (LAGraph_binary_function)(&BF1_PLUSrhs), BF_Tuple3, BF_Tuple3, BF_Tuple3)); // GrB_Monoid BF1_Tuple3_struct BF_identity = (BF1_Tuple3_struct) { .w = INFINITY, .h = UINT64_MAX, .pi = UINT64_MAX }; LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3, &BF_identity)); //GrB_Semiring GrB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3, BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3)); //-------------------------------------------------------------------------- // allocate arrays used for tuplets //-------------------------------------------------------------------------- I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; w = LAGraph_Malloc (nz, sizeof(double)) ; W = LAGraph_Malloc (nz, sizeof(BF1_Tuple3_struct)) ; LG_CHECK (I == NULL || J == NULL || w == NULL || W == NULL, -1004, "out of memory") ; //-------------------------------------------------------------------------- // create matrix Atmp based on A, while its entries become BF_Tuple3 type //-------------------------------------------------------------------------- LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A)); int nthreads; LAGRAPH_OK(LAGraph_GetNumThreads (&nthreads, NULL)) ; printf ("nthreads %d\n", nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0; k < nz; k++) { W[k] = (BF1_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 }; } GrB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n)); LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3)); LAGraph_Free ((void**)&I); LAGraph_Free ((void**)&J); LAGraph_Free ((void**)&W); LAGraph_Free ((void**)&w); //-------------------------------------------------------------------------- // create and initialize "distance" vector d, dmasked and dless //-------------------------------------------------------------------------- GrB_TRY (GrB_Vector_new(&d, BF_Tuple3, n)); // make d dense LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity, GrB_ALL, n, NULL)); // initial distance from s to itself BF1_Tuple3_struct d0 = (BF1_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s)); // creat dmasked as a sparse vector with only one entry at s GrB_TRY (GrB_Vector_new(&dmasked, BF_Tuple3, n)); LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s)); // create dless GrB_TRY (GrB_Vector_new(&dless, GrB_BOOL, n)); //-------------------------------------------------------------------------- // start the Bellman Ford process //-------------------------------------------------------------------------- bool any_dless= true; // if there is any newly found shortest path int64_t iter = 0; // number of iterations // terminate when no new path is found or more than V-1 loops while (any_dless && iter < n - 1) { // execute semiring on d and A, and save the result to dtmp GrB_TRY (GrB_vxm(dmasked, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL)); // dless = d .< dtmp //GrB_TRY (GrB_Vector_clear(dless)); GrB_TRY (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL)); // if there is no entry with smaller distance then all shortest paths // are found GrB_TRY (GrB_reduce (&any_dless, NULL, GrB_LOR_MONOID_BOOL, dless, NULL)) ; if(any_dless) { // update all entries with smaller distances GrB_TRY (GrB_apply(d, dless, NULL, BF_Identity_Tuple3, dmasked, NULL)); // only use entries that were just updated GrB_TRY (GrB_Vector_clear(dmasked)); GrB_TRY (GrB_apply(dmasked, dless, NULL, BF_Identity_Tuple3, d, NULL)); //try: //GrB_TRY (GrB_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R); } iter ++; } // check for negative-weight cycle only when there was a new path in the // last loop, otherwise, there can't be a negative-weight cycle. if (any_dless) { // execute semiring again to check for negative-weight cycle GrB_TRY (GrB_vxm(dmasked, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL)); // dless = d .< dtmp //GrB_TRY (GrB_Vector_clear(dless)); GrB_TRY (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL)); // if there is no entry with smaller distance then all shortest paths // are found GrB_TRY (GrB_reduce (&any_dless, NULL, GrB_LOR_MONOID_BOOL, dless, NULL)) ; if(any_dless) { // printf("A negative-weight cycle found. \n"); LAGraph_FREE_ALL; return (GrB_NO_VALUE) ; } } //-------------------------------------------------------------------------- // extract tuple from "distance" vector d and create GrB_Vectors for output //-------------------------------------------------------------------------- I = LAGraph_Malloc (n, sizeof(GrB_Index)) ; W = LAGraph_Malloc (n, sizeof(BF1_Tuple3_struct)) ; w = LAGraph_Malloc (n, sizeof(double)) ; h = LAGraph_Malloc (n, sizeof(GrB_Index)) ; pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ; LG_CHECK (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL, -1004, "out of memory") ; LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d)); for (GrB_Index k = 0; k < n; k++) { w [k] = W[k].w ; h [k] = W[k].h ; pi[k] = W[k].pi; } GrB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n)); GrB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 )); GrB_TRY (GrB_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64)); GrB_TRY (GrB_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64)); LAGraph_FREE_WORK; return (GrB_SUCCESS) ; }
omp_in_parallel.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* * Checks that false is returned when called from serial region * and true is returned when called within parallel region. */ int test_omp_in_parallel() { int serial; int isparallel; serial = 1; isparallel = 0; serial = omp_in_parallel(); #pragma omp parallel { #pragma omp single { isparallel = omp_in_parallel(); } } return (!(serial) && isparallel); } int main() { int i; int num_failed=0; // the test requires more than 1 thread to pass omp_set_dynamic(0); // disable dynamic adjustment of threads if (omp_get_max_threads() == 1) omp_set_num_threads(2); // set 2 threads if no HW resources available for(i = 0; i < REPETITIONS; i++) { if(!test_omp_in_parallel()) { num_failed++; } } return num_failed; }
scan-5.c
int foo (int *a, int *b) { int r = 0; #pragma omp parallel for reduction (inscan, +:r) default(none) firstprivate (a, b) for (int i = 0; i < 64; i++) { r += a[i]; #pragma omp scan inclusive (r) b[i] = r; } return r; }
loop_alignment_par.c
#include <omp.h> void compute(unsigned long **a, unsigned long **b, unsigned long **c, unsigned long **d, int N) { int i, j; #pragma omp parallel for for(i=1; i<N; i++) { a[i][1] = 3*b[i][1]; // S1 c[i][0] = a[i][1] * d[i][1]; // S3 #pragma omp parallel for for(j=2; j<N; j++) { b[i][j] = c[i][j-1]*c[i][j-1]; // S2 a[i][j] = 3*b[i][j]; // S1 c[i][j-1] = a[i][j] * d[i][j]; // S3 } b[i][N] = c[i][N-1]*c[i][N-1]; // S2 } }
Mat_dh.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.11 $ ***********************************************************************EHEADER*/ #include "_hypre_Euclid.h" /* #include "Mat_dh.h" */ /* #include "getRow_dh.h" */ /* #include "SubdomainGraph_dh.h" */ /* #include "TimeLog_dh.h" */ /* #include "Mem_dh.h" */ /* #include "Numbering_dh.h" */ /* #include "Parser_dh.h" */ /* #include "mat_dh_private.h" */ /* #include "io_dh.h" */ /* #include "Hash_i_dh.h" */ static void setup_matvec_sends_private(Mat_dh mat, HYPRE_Int *inlist); static void setup_matvec_receives_private(Mat_dh mat, HYPRE_Int *beg_rows, HYPRE_Int *end_rows, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist); #if 0 partial (?) implementation below; not used anyplace, I think; for future expansion? [mar 21, 2K+1] static void Mat_dhAllocate_getRow_private(Mat_dh A); #endif static bool commsOnly = false; /* experimental, for matvec functions */ #undef __FUNC__ #define __FUNC__ "Mat_dhCreate" void Mat_dhCreate(Mat_dh *mat) { START_FUNC_DH struct _mat_dh* tmp = (struct _mat_dh*)MALLOC_DH(sizeof(struct _mat_dh)); CHECK_V_ERROR; *mat = tmp; commsOnly = Parser_dhHasSwitch(parser_dh, "-commsOnly"); if (myid_dh == 0 && commsOnly == true) { /* hypre_printf("\n@@@ commsOnly == true for matvecs! @@@\n"); */ fflush(stdout); } tmp->m = 0; tmp->n = 0; tmp->beg_row = 0; tmp->bs = 1; tmp->rp = NULL; tmp->len = NULL; tmp->cval = NULL; tmp->aval = NULL; tmp->diag = NULL; tmp->fill = NULL; tmp->owner = true; tmp->len_private = 0; tmp->rowCheckedOut = -1; tmp->cval_private = NULL; tmp->aval_private = NULL; tmp->row_perm = NULL; tmp->num_recv = 0; tmp->num_send = 0; tmp->recv_req = NULL; tmp->send_req = NULL; tmp->status = NULL; tmp->recvbuf = NULL; tmp->sendbuf = NULL; tmp->sendind = NULL; tmp->sendlen = 0; tmp->recvlen = 0; tmp->numb = NULL; tmp->matvecIsSetup = false; Mat_dhZeroTiming(tmp); CHECK_V_ERROR; tmp->matvec_timing = true; tmp->debug = Parser_dhHasSwitch(parser_dh, "-debug_Mat"); END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhDestroy" void Mat_dhDestroy(Mat_dh mat) { START_FUNC_DH HYPRE_Int i; if (mat->owner) { if (mat->rp != NULL) { FREE_DH(mat->rp); CHECK_V_ERROR; } if (mat->len != NULL) { FREE_DH(mat->len); CHECK_V_ERROR; } if (mat->cval != NULL) { FREE_DH(mat->cval); CHECK_V_ERROR; } if (mat->aval != NULL) { FREE_DH(mat->aval); CHECK_V_ERROR; } if (mat->diag != NULL) { FREE_DH(mat->diag); CHECK_V_ERROR; } if (mat->fill != NULL) { FREE_DH(mat->fill); CHECK_V_ERROR; } if (mat->cval_private != NULL) { FREE_DH(mat->cval_private); CHECK_V_ERROR; } if (mat->aval_private != NULL) { FREE_DH(mat->aval_private); CHECK_V_ERROR; } if (mat->row_perm != NULL) { FREE_DH(mat->row_perm); CHECK_V_ERROR; } } for (i=0; i<mat->num_recv; i++) hypre_MPI_Request_free(&mat->recv_req[i]); for (i=0; i<mat->num_send; i++) hypre_MPI_Request_free(&mat->send_req[i]); if (mat->recv_req != NULL) { FREE_DH(mat->recv_req); CHECK_V_ERROR; } if (mat->send_req != NULL) { FREE_DH(mat->send_req); CHECK_V_ERROR; } if (mat->status != NULL) { FREE_DH(mat->status); CHECK_V_ERROR; } if (mat->recvbuf != NULL) { FREE_DH(mat->recvbuf); CHECK_V_ERROR; } if (mat->sendbuf != NULL) { FREE_DH(mat->sendbuf); CHECK_V_ERROR; } if (mat->sendind != NULL) { FREE_DH(mat->sendind); CHECK_V_ERROR; } if (mat->matvecIsSetup) { Mat_dhMatVecSetdown(mat); CHECK_V_ERROR; } if (mat->numb != NULL) { Numbering_dhDestroy(mat->numb); CHECK_V_ERROR; } FREE_DH(mat); CHECK_V_ERROR; END_FUNC_DH } /* this should put the cval array back the way it was! */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVecSetDown" void Mat_dhMatVecSetdown(Mat_dh mat) { START_FUNC_DH if (ignoreMe) SET_V_ERROR("not implemented"); END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVecSetup" void Mat_dhMatVecSetup(Mat_dh mat) { START_FUNC_DH if (np_dh == 1) { goto DO_NOTHING; } else { HYPRE_Int *outlist, *inlist; HYPRE_Int ierr, i, row, *rp = mat->rp, *cval = mat->cval; Numbering_dh numb; HYPRE_Int m = mat->m; HYPRE_Int firstLocal = mat->beg_row; HYPRE_Int lastLocal = firstLocal+m; HYPRE_Int *beg_rows, *end_rows; mat->recv_req = (hypre_MPI_Request *)MALLOC_DH(np_dh * sizeof(hypre_MPI_Request)); CHECK_V_ERROR; mat->send_req = (hypre_MPI_Request *)MALLOC_DH(np_dh * sizeof(hypre_MPI_Request)); CHECK_V_ERROR; mat->status = (hypre_MPI_Status *)MALLOC_DH(np_dh * sizeof(hypre_MPI_Status)); CHECK_V_ERROR; beg_rows = (HYPRE_Int*)MALLOC_DH(np_dh*sizeof(HYPRE_Int)); CHECK_V_ERROR; end_rows = (HYPRE_Int*)MALLOC_DH(np_dh*sizeof(HYPRE_Int)); CHECK_V_ERROR; if (np_dh == 1) { /* this is for debugging purposes in some of the drivers */ beg_rows[0] = 0; end_rows[0] = m; } else { ierr = hypre_MPI_Allgather(&firstLocal, 1, HYPRE_MPI_INT, beg_rows, 1, HYPRE_MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Allgather(&lastLocal, 1, HYPRE_MPI_INT, end_rows, 1, HYPRE_MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); } outlist = (HYPRE_Int *)MALLOC_DH(np_dh*sizeof(HYPRE_Int)); CHECK_V_ERROR; inlist = (HYPRE_Int *)MALLOC_DH(np_dh*sizeof(HYPRE_Int)); CHECK_V_ERROR; for (i=0; i<np_dh; ++i) { outlist[i] = 0; inlist[i] = 0; } /* Create Numbering object */ Numbering_dhCreate(&(mat->numb)); CHECK_V_ERROR; numb = mat->numb; Numbering_dhSetup(numb, mat); CHECK_V_ERROR; setup_matvec_receives_private(mat, beg_rows, end_rows, numb->num_ext, numb->idx_ext, outlist); CHECK_V_ERROR; if (np_dh == 1) { /* this is for debugging purposes in some of the drivers */ inlist[0] = outlist[0]; } else { ierr = hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); } setup_matvec_sends_private(mat, inlist); CHECK_V_ERROR; /* Convert to local indices */ for (row=0; row<m; row++) { HYPRE_Int len = rp[row+1]-rp[row]; HYPRE_Int *ind = cval+rp[row]; Numbering_dhGlobalToLocal(numb, len, ind, ind); CHECK_V_ERROR; } FREE_DH(outlist); CHECK_V_ERROR; FREE_DH(inlist); CHECK_V_ERROR; FREE_DH(beg_rows); CHECK_V_ERROR; FREE_DH(end_rows); CHECK_V_ERROR; } DO_NOTHING: ; END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "setup_matvec_receives_private" void setup_matvec_receives_private(Mat_dh mat, HYPRE_Int *beg_rows, HYPRE_Int *end_rows, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist) { START_FUNC_DH HYPRE_Int ierr, i, j, this_pe; hypre_MPI_Request request; HYPRE_Int m = mat->m; mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entries saved for local part of x, used in matvec */ mat->recvbuf = (double*)MALLOC_DH((reqlen+m) * sizeof(double)); for (i=0; i<reqlen; i=j) { /* j is set below */ /* The processor that owns the row with index reqind[i] */ this_pe = mat_find_owner(beg_rows, end_rows, reqind[i]); CHECK_V_ERROR; /* Figure out other rows we need from this_pe */ for (j=i+1; j<reqlen; j++) { /* if row is on different pe */ if (reqind[j] < beg_rows[this_pe] || reqind[j] > end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ ierr = hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm_dh, &request); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Request_free(&request); CHECK_MPI_V_ERROR(ierr); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j-i; ierr = hypre_MPI_Recv_init(&mat->recvbuf[i+m], j-i, hypre_MPI_DOUBLE, this_pe, 555, comm_dh, &mat->recv_req[mat->num_recv]); CHECK_MPI_V_ERROR(ierr); mat->num_recv++; mat->recvlen += j-i; /* only used for statistical reporting */ } END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "setup_matvec_sends_private" void setup_matvec_sends_private(Mat_dh mat, HYPRE_Int *inlist) { START_FUNC_DH HYPRE_Int ierr, i, j, sendlen, first = mat->beg_row; hypre_MPI_Request *requests; hypre_MPI_Status *statuses; requests = (hypre_MPI_Request *) MALLOC_DH(np_dh * sizeof(hypre_MPI_Request)); CHECK_V_ERROR; statuses = (hypre_MPI_Status *) MALLOC_DH(np_dh * sizeof(hypre_MPI_Status)); CHECK_V_ERROR; /* Determine size of and allocate sendbuf and sendind */ sendlen = 0; for (i=0; i<np_dh; i++) sendlen += inlist[i]; mat->sendlen = sendlen; mat->sendbuf = (double *)MALLOC_DH(sendlen * sizeof(double)); CHECK_V_ERROR; mat->sendind = (HYPRE_Int *)MALLOC_DH(sendlen * sizeof(HYPRE_Int)); CHECK_V_ERROR; j = 0; mat->num_send = 0; for (i=0; i<np_dh; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ ierr = hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm_dh, &requests[mat->num_send]); CHECK_MPI_V_ERROR(ierr); /* Set up the send */ ierr = hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 555, comm_dh, &mat->send_req[mat->num_send]); CHECK_MPI_V_ERROR(ierr); mat->num_send++; j += inlist[i]; } } /* total bytes to be sent during matvec */ mat->time[MATVEC_WORDS] = j; ierr = hypre_MPI_Waitall(mat->num_send, requests, statuses); CHECK_MPI_V_ERROR(ierr); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i=0; i<mat->sendlen; i++) mat->sendind[i] -= first; FREE_DH(requests); FREE_DH(statuses); END_FUNC_DH } /* unthreaded MPI version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec" void Mat_dhMatVec(Mat_dh mat, double *x, double *b) { START_FUNC_DH if (np_dh == 1) { Mat_dhMatVec_uni(mat, x, b); CHECK_V_ERROR; } else { HYPRE_Int ierr, i, row, m = mat->m; HYPRE_Int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; HYPRE_Int *sendind = mat->sendind; HYPRE_Int sendlen = mat->sendlen; double *sendbuf = mat->sendbuf; double *recvbuf = mat->recvbuf; double t1 = 0, t2 = 0, t3 = 0, t4 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = hypre_MPI_Wtime(); /* Put components of x into the right outgoing buffers */ if (! commsOnly) { for (i=0; i<sendlen; i++) sendbuf[i] = x[sendind[i]]; } if (timeFlag) { t2 = hypre_MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); } ierr = hypre_MPI_Startall(mat->num_recv, mat->recv_req); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Startall(mat->num_send, mat->send_req); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->status); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->status); CHECK_MPI_V_ERROR(ierr); if (timeFlag) { t3 = hypre_MPI_Wtime(); mat->time[MATVEC_MPI_TIME] += (t3 - t2); } /* Copy local part of x into top part of recvbuf */ if (! commsOnly) { for (i=0; i<m; i++) recvbuf[i] = x[i]; /* do the multiply */ for (row=0; row<m; row++) { HYPRE_Int len = rp[row+1] - rp[row]; HYPRE_Int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * recvbuf[ind[i]]); } b[row] = temp; } } /* if (! commsOnly) */ if (timeFlag) { t4 = hypre_MPI_Wtime(); mat->time[MATVEC_TOTAL_TIME] += (t4 - t1); mat->time[MATVEC_TIME] += (t4 - t3); } } END_FUNC_DH } /* OpenMP/MPI version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_omp" void Mat_dhMatVec_omp(Mat_dh mat, double *x, double *b) { START_FUNC_DH HYPRE_Int ierr, i, row, m = mat->m; HYPRE_Int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; HYPRE_Int *sendind = mat->sendind; HYPRE_Int sendlen = mat->sendlen; double *sendbuf = mat->sendbuf; double *recvbuf = mat->recvbuf; double t1 = 0, t2 = 0, t3 = 0, t4 = 0, tx = 0; double *val, temp; HYPRE_Int len, *ind; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = hypre_MPI_Wtime(); /* Put components of x into the right outgoing buffers */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(i) #endif for (i=0; i<sendlen; i++) sendbuf[i] = x[sendind[i]]; if (timeFlag) { t2 = hypre_MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); } ierr = hypre_MPI_Startall(mat->num_recv, mat->recv_req); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Startall(mat->num_send, mat->send_req); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->status); CHECK_MPI_V_ERROR(ierr); ierr = hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->status); CHECK_MPI_V_ERROR(ierr); if (timeFlag) { t3 = hypre_MPI_Wtime(); mat->time[MATVEC_MPI_TIME] += (t3 - t2); } /* Copy local part of x into top part of recvbuf */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(i) #endif for (i=0; i<m; i++) recvbuf[i] = x[i]; if (timeFlag) { tx = hypre_MPI_Wtime(); mat->time[MATVEC_MPI_TIME2] += (tx - t1); } /* do the multiply */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(row,i,len,ind,val,temp) #endif for (row=0; row<m; row++) { len = rp[row+1] - rp[row]; ind = cval+rp[row]; val = aval+rp[row]; temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * recvbuf[ind[i]]); } b[row] = temp; } if (timeFlag) { t4 = hypre_MPI_Wtime(); mat->time[MATVEC_TOTAL_TIME] += (t4 - t1); mat->time[MATVEC_TIME] += (t4 - t3); } END_FUNC_DH } /* OpenMP/single primary task version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_uni_omp" void Mat_dhMatVec_uni_omp(Mat_dh mat, double *x, double *b) { START_FUNC_DH HYPRE_Int i, row, m = mat->m; HYPRE_Int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; double t1 = 0, t2 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) { t1 = hypre_MPI_Wtime(); } /* do the multiply */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(row,i) #endif for (row=0; row<m; row++) { HYPRE_Int len = rp[row+1] - rp[row]; HYPRE_Int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * x[ind[i]]); } b[row] = temp; } if (timeFlag) { t2 = hypre_MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); mat->time[MATVEC_TOTAL_TIME] += (t2 - t1); } END_FUNC_DH } /* unthreaded, single-task version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_uni" void Mat_dhMatVec_uni(Mat_dh mat, double *x, double *b) { START_FUNC_DH HYPRE_Int i, row, m = mat->m; HYPRE_Int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; double t1 = 0, t2 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = hypre_MPI_Wtime(); for (row=0; row<m; row++) { HYPRE_Int len = rp[row+1] - rp[row]; HYPRE_Int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * x[ind[i]]); } b[row] = temp; } if (timeFlag) { t2 = hypre_MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); mat->time[MATVEC_TOTAL_TIME] += (t2 - t1); } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhReadNz" HYPRE_Int Mat_dhReadNz(Mat_dh mat) { START_FUNC_DH HYPRE_Int ierr, retval = mat->rp[mat->m]; HYPRE_Int nz = retval; ierr = hypre_MPI_Allreduce(&nz, &retval, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm_dh); CHECK_MPI_ERROR(ierr); END_FUNC_VAL(retval) } #if 0 #undef __FUNC__ #define __FUNC__ "Mat_dhAllocate_getRow_private" void Mat_dhAllocate_getRow_private(Mat_dh A) { START_FUNC_DH HYPRE_Int i, *rp = A->rp, len = 0; HYPRE_Int m = A->m; /* find longest row in matrix */ for (i=0; i<m; ++i) len = MAX(len, rp[i+1]-rp[i]); len *= A->bs; /* free any previously allocated private storage */ if (len > A->len_private) { if (A->cval_private != NULL) { FREE_DH(A->cval_private); CHECK_V_ERROR; } if (A->aval_private != NULL) { FREE_DH(A->aval_private); CHECK_V_ERROR; } } /* allocate private storage */ A->cval_private = (HYPRE_Int*)MALLOC_DH(len*sizeof(HYPRE_Int)); CHECK_V_ERROR; A->aval_private = (double*)MALLOC_DH(len*sizeof(double)); CHECK_V_ERROR; A->len_private = len; END_FUNC_DH } #endif #undef __FUNC__ #define __FUNC__ "Mat_dhZeroTiming" void Mat_dhZeroTiming(Mat_dh mat) { START_FUNC_DH HYPRE_Int i; for (i=0; i<MAT_DH_BINS; ++i) { mat->time[i] = 0; mat->time_max[i] = 0; mat->time_min[i] = 0; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhReduceTiming" void Mat_dhReduceTiming(Mat_dh mat) { START_FUNC_DH if (mat->time[MATVEC_MPI_TIME]) { mat->time[MATVEC_RATIO] = mat->time[MATVEC_TIME] / mat->time[MATVEC_MPI_TIME]; } hypre_MPI_Allreduce(mat->time, mat->time_min, MAT_DH_BINS, hypre_MPI_DOUBLE, hypre_MPI_MIN, comm_dh); hypre_MPI_Allreduce(mat->time, mat->time_max, MAT_DH_BINS, hypre_MPI_DOUBLE, hypre_MPI_MAX, comm_dh); END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPermute" void Mat_dhPermute(Mat_dh A, HYPRE_Int *n2o, Mat_dh *Bout) { START_FUNC_DH Mat_dh B; HYPRE_Int i, j, *RP = A->rp, *CVAL = A->cval; HYPRE_Int *o2n, *rp, *cval, m = A->m, nz = RP[m]; double *aval, *AVAL = A->aval; Mat_dhCreate(&B); CHECK_V_ERROR; B->m = B->n = m; *Bout = B; /* form inverse permutation */ o2n = (HYPRE_Int*)MALLOC_DH(m*sizeof(HYPRE_Int)); CHECK_V_ERROR; for (i=0; i<m; ++i) o2n[n2o[i]] = i; /* allocate storage for permuted matrix */ rp = B->rp = (HYPRE_Int*)MALLOC_DH((m+1)*sizeof(HYPRE_Int)); CHECK_V_ERROR; cval = B->cval = (HYPRE_Int*)MALLOC_DH(nz*sizeof(HYPRE_Int)); CHECK_V_ERROR; aval = B->aval = (double*)MALLOC_DH(nz*sizeof(double)); CHECK_V_ERROR; /* form new rp array */ rp[0] = 0; for (i=0; i<m; ++i) { HYPRE_Int oldRow = n2o[i]; rp[i+1] = RP[oldRow+1]-RP[oldRow]; } for (i=1; i<=m; ++i) rp[i] = rp[i] + rp[i-1]; for (i=0; i<m; ++i) { HYPRE_Int oldRow = n2o[i]; HYPRE_Int idx = rp[i]; for (j=RP[oldRow]; j<RP[oldRow+1]; ++j) { cval[idx] = o2n[CVAL[j]]; aval[idx] = AVAL[j]; ++idx; } } FREE_DH(o2n); CHECK_V_ERROR; END_FUNC_DH } /*---------------------------------------------------------------------- * Print methods *----------------------------------------------------------------------*/ /* seq or mpi */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintGraph" void Mat_dhPrintGraph(Mat_dh A, SubdomainGraph_dh sg, FILE *fp) { START_FUNC_DH HYPRE_Int pe, id = myid_dh; HYPRE_Int ierr; if (sg != NULL) { id = sg->o2n_sub[id]; } for (pe=0; pe<np_dh; ++pe) { ierr = hypre_MPI_Barrier(comm_dh); CHECK_MPI_V_ERROR(ierr); if (id == pe) { if (sg == NULL) { mat_dh_print_graph_private(A->m, A->beg_row, A->rp, A->cval, A->aval, NULL, NULL, NULL, fp); CHECK_V_ERROR; } else { HYPRE_Int beg_row = sg->beg_rowP[myid_dh]; mat_dh_print_graph_private(A->m, beg_row, A->rp, A->cval, A->aval, sg->n2o_row, sg->o2n_col, sg->o2n_ext, fp); CHECK_V_ERROR; } } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintRows" void Mat_dhPrintRows(Mat_dh A, SubdomainGraph_dh sg, FILE *fp) { START_FUNC_DH bool noValues; HYPRE_Int m = A->m, *rp = A->rp, *cval = A->cval; double *aval = A->aval; noValues = (Parser_dhHasSwitch(parser_dh, "-noValues")); if (noValues) aval = NULL; /*---------------------------------------------------------------- * case 1: print local portion of unpermuted matrix *----------------------------------------------------------------*/ if (sg == NULL) { HYPRE_Int i, j; HYPRE_Int beg_row = A->beg_row; hypre_fprintf(fp, "\n----- A, unpermuted ------------------------------------\n"); for (i=0; i<m; ++i) { hypre_fprintf(fp, "%i :: ", 1+i+beg_row); for (j=rp[i]; j<rp[i+1]; ++j) { if (noValues) { hypre_fprintf(fp, "%i ", 1+cval[j]); } else { hypre_fprintf(fp, "%i,%g ; ", 1+cval[j], aval[j]); } } hypre_fprintf(fp, "\n"); } } /*---------------------------------------------------------------- * case 2: single mpi task, with multiple subdomains *----------------------------------------------------------------*/ else if (np_dh == 1) { HYPRE_Int i, k, idx = 1; HYPRE_Int oldRow; for (i=0; i<sg->blocks; ++i) { HYPRE_Int oldBlock = sg->n2o_sub[i]; /* here, 'beg_row' and 'end_row' refer to rows in the original ordering of A. */ HYPRE_Int beg_row = sg->beg_row[oldBlock]; HYPRE_Int end_row = beg_row + sg->row_count[oldBlock]; hypre_fprintf(fp, "\n"); hypre_fprintf(fp, "\n----- A, permuted, single mpi task ------------------\n"); hypre_fprintf(fp, "---- new subdomain: %i; old subdomain: %i\n", i, oldBlock); hypre_fprintf(fp, " old beg_row: %i; new beg_row: %i\n", sg->beg_row[oldBlock], sg->beg_rowP[oldBlock]); hypre_fprintf(fp, " local rows in this block: %i\n", sg->row_count[oldBlock]); hypre_fprintf(fp, " bdry rows in this block: %i\n", sg->bdry_count[oldBlock]); hypre_fprintf(fp, " 1st bdry row= %i \n", 1+end_row-sg->bdry_count[oldBlock]); for (oldRow=beg_row; oldRow<end_row; ++oldRow) { HYPRE_Int len = 0, *cval; double *aval; hypre_fprintf(fp, "%3i (old= %3i) :: ", idx, 1+oldRow); ++idx; Mat_dhGetRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; for (k=0; k<len; ++k) { if (noValues) { hypre_fprintf(fp, "%i ", 1+sg->o2n_col[cval[k]]); } else { hypre_fprintf(fp, "%i,%g ; ", 1+sg->o2n_col[cval[k]], aval[k]); } } hypre_fprintf(fp, "\n"); Mat_dhRestoreRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 3: multiple mpi tasks, one subdomain per task *----------------------------------------------------------------*/ else { Hash_i_dh hash = sg->o2n_ext; HYPRE_Int *o2n_col = sg->o2n_col, *n2o_row = sg->n2o_row; HYPRE_Int beg_row = sg->beg_row[myid_dh]; HYPRE_Int beg_rowP = sg->beg_rowP[myid_dh]; HYPRE_Int i, j; for (i=0; i<m; ++i) { HYPRE_Int row = n2o_row[i]; hypre_fprintf(fp, "%3i (old= %3i) :: ", 1+i+beg_rowP, 1+row+beg_row); for (j=rp[row]; j<rp[row+1]; ++j) { HYPRE_Int col = cval[j]; /* find permuted (old-to-new) value for the column */ /* case i: column is locally owned */ if (col >= beg_row && col < beg_row+m) { col = o2n_col[col-beg_row] + beg_rowP; } /* case ii: column is external */ else { HYPRE_Int tmp = col; tmp = Hash_i_dhLookup(hash, col); CHECK_V_ERROR; if (tmp == -1) { hypre_sprintf(msgBuf_dh, "nonlocal column= %i not in hash table", 1+col); SET_V_ERROR(msgBuf_dh); } else { col = tmp; } } if (noValues) { hypre_fprintf(fp, "%i ", 1+col); } else { hypre_fprintf(fp, "%i,%g ; ", 1+col, aval[j]); } } hypre_fprintf(fp, "\n"); } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintTriples" void Mat_dhPrintTriples(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH HYPRE_Int m = A->m, *rp = A->rp, *cval = A->cval; double *aval = A->aval; bool noValues; bool matlab; FILE *fp; noValues = (Parser_dhHasSwitch(parser_dh, "-noValues")); if (noValues) aval = NULL; matlab = (Parser_dhHasSwitch(parser_dh, "-matlab")); /*---------------------------------------------------------------- * case 1: unpermuted matrix, single or multiple mpi tasks *----------------------------------------------------------------*/ if (sg == NULL) { HYPRE_Int i, j, pe; HYPRE_Int beg_row = A->beg_row; double val; for (pe=0; pe<np_dh; ++pe) { hypre_MPI_Barrier(comm_dh); if (pe == myid_dh) { if (pe == 0) { fp=openFile_dh(filename, "w"); CHECK_V_ERROR; } else { fp=openFile_dh(filename, "a"); CHECK_V_ERROR; } for (i=0; i<m; ++i) { for (j=rp[i]; j<rp[i+1]; ++j) { if (noValues) { hypre_fprintf(fp, "%i %i\n", 1+i+beg_row, 1+cval[j]); } else { val = aval[j]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; hypre_fprintf(fp, TRIPLES_FORMAT, 1+i+beg_row, 1+cval[j], val); } } } closeFile_dh(fp); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 2: single mpi task, with multiple subdomains *----------------------------------------------------------------*/ else if (np_dh == 1) { HYPRE_Int i, j, k, idx = 1; fp=openFile_dh(filename, "w"); CHECK_V_ERROR; for (i=0; i<sg->blocks; ++i) { HYPRE_Int oldBlock = sg->n2o_sub[i]; HYPRE_Int beg_row = sg->beg_rowP[oldBlock]; HYPRE_Int end_row = beg_row + sg->row_count[oldBlock]; for (j=beg_row; j<end_row; ++j) { HYPRE_Int len = 0, *cval; double *aval; HYPRE_Int oldRow = sg->n2o_row[j]; Mat_dhGetRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; if (noValues) { for (k=0; k<len; ++k) { hypre_fprintf(fp, "%i %i\n", idx, 1+sg->o2n_col[cval[k]]); } ++idx; } else { for (k=0; k<len; ++k) { double val = aval[k]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; hypre_fprintf(fp, TRIPLES_FORMAT, idx, 1+sg->o2n_col[cval[k]], val); } ++idx; } Mat_dhRestoreRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 3: multiple mpi tasks, one subdomain per task *----------------------------------------------------------------*/ else { Hash_i_dh hash = sg->o2n_ext; HYPRE_Int *o2n_col = sg->o2n_col, *n2o_row = sg->n2o_row; HYPRE_Int beg_row = sg->beg_row[myid_dh]; HYPRE_Int beg_rowP = sg->beg_rowP[myid_dh]; HYPRE_Int i, j, pe; HYPRE_Int id = sg->o2n_sub[myid_dh]; for (pe=0; pe<np_dh; ++pe) { hypre_MPI_Barrier(comm_dh); if (id == pe) { if (pe == 0) { fp=openFile_dh(filename, "w"); CHECK_V_ERROR; } else { fp=openFile_dh(filename, "a"); CHECK_V_ERROR; } for (i=0; i<m; ++i) { HYPRE_Int row = n2o_row[i]; for (j=rp[row]; j<rp[row+1]; ++j) { HYPRE_Int col = cval[j]; double val = 0.0; if (aval != NULL) val = aval[j]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; /* find permuted (old-to-new) value for the column */ /* case i: column is locally owned */ if (col >= beg_row && col < beg_row+m) { col = o2n_col[col-beg_row] + beg_rowP; } /* case ii: column is external */ else { HYPRE_Int tmp = col; tmp = Hash_i_dhLookup(hash, col); CHECK_V_ERROR; if (tmp == -1) { hypre_sprintf(msgBuf_dh, "nonlocal column= %i not in hash table", 1+col); SET_V_ERROR(msgBuf_dh); } else { col = tmp; } } if (noValues) { hypre_fprintf(fp, "%i %i\n", 1+i+beg_rowP, 1+col); } else { hypre_fprintf(fp, TRIPLES_FORMAT, 1+i+beg_rowP, 1+col, val); } } } closeFile_dh(fp); CHECK_V_ERROR; } } } END_FUNC_DH } /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintCSR" void Mat_dhPrintCSR(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH FILE *fp; if (np_dh > 1) { SET_V_ERROR("only implemented for a single mpi task"); } if (sg != NULL) { SET_V_ERROR("not implemented for reordered matrix (SubdomainGraph_dh should be NULL)"); } fp=openFile_dh(filename, "w"); CHECK_V_ERROR; if (sg == NULL) { mat_dh_print_csr_private(A->m, A->rp, A->cval, A->aval, fp); CHECK_V_ERROR; } else { mat_dh_print_csr_private(A->m, A->rp, A->cval, A->aval, fp); CHECK_V_ERROR; } closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* seq */ /* no reordering */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintBIN" void Mat_dhPrintBIN(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } /* if (n2o != NULL || o2n != NULL || hash != NULL) { */ if (sg != NULL) { SET_V_ERROR("not implemented for reordering; ensure sg=NULL"); } io_dh_print_ebin_mat_private(A->m, A->beg_row, A->rp, A->cval, A->aval, NULL, NULL, NULL, filename); CHECK_V_ERROR; END_FUNC_DH } /*---------------------------------------------------------------------- * Read methods *----------------------------------------------------------------------*/ /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadCSR" void Mat_dhReadCSR(Mat_dh *mat, char *filename) { START_FUNC_DH Mat_dh A; FILE *fp; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } fp=openFile_dh(filename, "r"); CHECK_V_ERROR; Mat_dhCreate(&A); CHECK_V_ERROR; mat_dh_read_csr_private(&A->m, &A->rp, &A->cval, &A->aval, fp); CHECK_V_ERROR; A->n = A->m; *mat = A; closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadTriples" void Mat_dhReadTriples(Mat_dh *mat, HYPRE_Int ignore, char *filename) { START_FUNC_DH FILE *fp = NULL; Mat_dh A = NULL; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } fp=openFile_dh(filename, "r"); CHECK_V_ERROR; Mat_dhCreate(&A); CHECK_V_ERROR; mat_dh_read_triples_private(ignore, &A->m, &A->rp, &A->cval, &A->aval, fp); CHECK_V_ERROR; A->n = A->m; *mat = A; closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* here we pass the private function a filename, instead of an open file, the reason being that Euclid's binary format is more complicated, i.e, the other "Read" methods are only for a single mpi task. */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadBIN" void Mat_dhReadBIN(Mat_dh *mat, char *filename) { START_FUNC_DH Mat_dh A; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } Mat_dhCreate(&A); CHECK_V_ERROR; io_dh_read_ebin_mat_private(&A->m, &A->rp, &A->cval, &A->aval, filename); CHECK_V_ERROR; A->n = A->m; *mat = A; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhTranspose" void Mat_dhTranspose(Mat_dh A, Mat_dh *Bout) { START_FUNC_DH Mat_dh B; if (np_dh > 1) { SET_V_ERROR("only for sequential"); } Mat_dhCreate(&B); CHECK_V_ERROR; *Bout = B; B->m = B->n = A->m; mat_dh_transpose_private(A->m, A->rp, &B->rp, A->cval, &B->cval, A->aval, &B->aval); CHECK_V_ERROR; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhMakeStructurallySymmetric" void Mat_dhMakeStructurallySymmetric(Mat_dh A) { START_FUNC_DH if (np_dh > 1) { SET_V_ERROR("only for sequential"); } make_symmetric_private(A->m, &A->rp, &A->cval, &A->aval); CHECK_V_ERROR; END_FUNC_DH } void insert_diags_private(Mat_dh A, HYPRE_Int ct); /* inserts diagonal if not explicitly present; sets diagonal value in row i to sum of absolute values of all elts in row i. */ #undef __FUNC__ #define __FUNC__ "Mat_dhFixDiags" void Mat_dhFixDiags(Mat_dh A) { START_FUNC_DH HYPRE_Int i, j; HYPRE_Int *rp = A->rp, *cval = A->cval, m = A->m; bool ct = 0; /* number of missing diagonals */ double *aval = A->aval; /* determine if any diagonals are missing */ for (i=0; i<m; ++i) { bool flag = true; for (j=rp[i]; j<rp[i+1]; ++j) { HYPRE_Int col = cval[j]; if (col == i) { flag = false; break; } } if (flag) ++ct; } /* insert any missing diagonal elements */ if (ct) { hypre_printf("\nMat_dhFixDiags:: %i diags not explicitly present; inserting!\n", ct); insert_diags_private(A, ct); CHECK_V_ERROR; rp = A->rp; cval = A->cval; aval = A->aval; } /* set the value of all diagonal elements */ for (i=0; i<m; ++i) { double sum = 0.0; for (j=rp[i]; j<rp[i+1]; ++j) { sum += fabs(aval[j]); } for (j=rp[i]; j<rp[i+1]; ++j) { if (cval[j] == i) { aval[j] = sum; } } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "insert_diags_private" void insert_diags_private(Mat_dh A, HYPRE_Int ct) { START_FUNC_DH HYPRE_Int *RP = A->rp, *CVAL = A->cval; HYPRE_Int *rp, *cval, m = A->m; double *aval, *AVAL = A->aval; HYPRE_Int nz = RP[m] + ct; HYPRE_Int i, j, idx = 0; rp = A->rp = (HYPRE_Int*)MALLOC_DH((m+1)*sizeof(HYPRE_Int)); CHECK_V_ERROR; cval = A->cval = (HYPRE_Int*)MALLOC_DH(nz*sizeof(HYPRE_Int)); CHECK_V_ERROR; aval = A->aval = (double*)MALLOC_DH(nz*sizeof(double)); CHECK_V_ERROR; rp[0] = 0; for (i=0; i<m; ++i) { bool flag = true; for (j=RP[i]; j<RP[i+1]; ++j) { cval[idx] = CVAL[j]; aval[idx] = AVAL[j]; ++idx; if (CVAL[j] == i) flag = false; } if (flag) { cval[idx] = i; aval[idx] = 0.0; ++idx; } rp[i+1] = idx; } FREE_DH(RP); CHECK_V_ERROR; FREE_DH(CVAL); CHECK_V_ERROR; FREE_DH(AVAL); CHECK_V_ERROR; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintDiags" void Mat_dhPrintDiags(Mat_dh A, FILE *fp) { START_FUNC_DH HYPRE_Int i, j, m = A->m; HYPRE_Int *rp = A->rp, *cval = A->cval; double *aval = A->aval; hypre_fprintf(fp, "=================== diagonal elements ====================\n"); for (i=0; i<m; ++i) { bool flag = true; for (j=rp[i]; j<rp[i+1]; ++j) { if (cval[j] == i) { hypre_fprintf(fp, "%i %g\n", i+1, aval[j]); flag = false; break; } } if (flag) { hypre_fprintf(fp, "%i ---------- missing\n", i+1); } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhGetRow" void Mat_dhGetRow(Mat_dh B, HYPRE_Int globalRow, HYPRE_Int *len, HYPRE_Int **ind, double **val) { START_FUNC_DH HYPRE_Int row = globalRow - B->beg_row; if (row > B->m) { hypre_sprintf(msgBuf_dh, "requested globalRow= %i, which is local row= %i, but only have %i rows!", globalRow, row, B->m); SET_V_ERROR(msgBuf_dh); } *len = B->rp[row+1] - B->rp[row]; if (ind != NULL) *ind = B->cval + B->rp[row]; if (val != NULL) *val = B->aval + B->rp[row]; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhRestoreRow" void Mat_dhRestoreRow(Mat_dh B, HYPRE_Int row, HYPRE_Int *len, HYPRE_Int **ind, double **val) { START_FUNC_DH END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhRowPermute" void Mat_dhRowPermute(Mat_dh mat) { START_FUNC_DH if (ignoreMe) SET_V_ERROR("turned off; compilation problem on blue"); #if 0 HYPRE_Int i, j, m = mat->m, nz = mat->rp[m]; HYPRE_Int *o2n, *cval; HYPRE_Int algo = 1; double *r1, *c1; bool debug = mat->debug; bool isNatural; Mat_dh B; #if 0 * = 1 : Compute a row permutation of the matrix so that the * permuted matrix has as many entries on its diagonal as * possible. The values on the diagonal are of arbitrary size. * HSL subroutine MC21A/AD is used for this. * = 2 : Compute a row permutation of the matrix so that the smallest * value on the diagonal of the permuted matrix is maximized. * = 3 : Compute a row permutation of the matrix so that the smallest * value on the diagonal of the permuted matrix is maximized. * The algorithm differs from the one used for JOB = 2 and may * have quite a different performance. * = 4 : Compute a row permutation of the matrix so that the sum * of the diagonal entries of the permuted matrix is maximized. * = 5 : Compute a row permutation of the matrix so that the product * of the diagonal entries of the permuted matrix is maximized * and vectors to scale the matrix so that the nonzero diagonal * entries of the permuted matrix are one in absolute value and * all the off-diagonal entries are less than or equal to one in * absolute value. #endif Parser_dhReadInt(parser_dh, "-rowPermute", &algo); CHECK_V_ERROR; if (algo < 1) algo = 1; if (algo > 5) algo = 1; hypre_sprintf(msgBuf_dh, "calling row permutation with algo= %i", algo); SET_INFO(msgBuf_dh); r1 = (double*)MALLOC_DH(m*sizeof(double)); CHECK_V_ERROR; c1 = (double*)MALLOC_DH(m*sizeof(double)); CHECK_V_ERROR; if (mat->row_perm == NULL) { mat->row_perm = o2n = (HYPRE_Int*)MALLOC_DH(m*sizeof(HYPRE_Int)); CHECK_V_ERROR; } else { o2n = mat->row_perm; } Mat_dhTranspose(mat, &B); CHECK_V_ERROR; /* get row permutation and scaling vectors */ dldperm(algo, m, nz, B->rp, B->cval, B->aval, o2n, r1, c1); /* permute column indices, then turn the matrix rightside up */ cval = B->cval; for (i=0; i<nz; ++i) cval[i] = o2n[cval[i]]; /* debug block */ if (debug && logFile != NULL) { hypre_fprintf(logFile, "\n-------- row permutation vector --------\n"); for (i=0; i<m; ++i) hypre_fprintf(logFile, "%i ", 1+o2n[i]); hypre_fprintf(logFile, "\n"); if (myid_dh == 0) { hypre_printf("\n-------- row permutation vector --------\n"); for (i=0; i<m; ++i) hypre_printf("%i ", 1+o2n[i]); hypre_printf("\n"); } } /* check to see if permutation is non-natural */ isNatural = true; for (i=0; i<m; ++i) { if (o2n[i] != i) { isNatural = false; break; } } if (isNatural) { hypre_printf("@@@ [%i] Mat_dhRowPermute :: got natural ordering!\n", myid_dh); } else { HYPRE_Int *rp = B->rp, *cval = B->cval; double *aval = B->aval; if (algo == 5) { hypre_printf("@@@ [%i] Mat_dhRowPermute :: scaling matrix rows and columns!\n", myid_dh); /* scale matrix */ for (i=0; i<m; i++) { r1[i] = exp(r1[i]); c1[i] = exp(c1[i]); } for (i=0; i<m; i++) for (j=rp[i]; j<rp[i+1]; j++) aval[j] *= r1[cval[j]] * c1[i]; } mat_dh_transpose_reuse_private(B->m, B->rp, B->cval, B->aval, mat->rp, mat->cval, mat->aval); CHECK_V_ERROR; } Mat_dhDestroy(B); CHECK_V_ERROR; FREE_DH(r1); CHECK_V_ERROR; FREE_DH(c1); CHECK_V_ERROR; #endif END_FUNC_DH } /*==============================================================================*/ #undef __FUNC__ #define __FUNC__ "Mat_dhPartition" void build_adj_lists_private(Mat_dh mat, HYPRE_Int **rpOUT, HYPRE_Int **cvalOUT) { START_FUNC_DH HYPRE_Int m = mat->m; HYPRE_Int *RP = mat->rp, *CVAL = mat->cval; HYPRE_Int nz = RP[m]; HYPRE_Int i, j, *rp, *cval, idx = 0; rp = *rpOUT = (HYPRE_Int *)MALLOC_DH((m+1)*sizeof(HYPRE_Int)); CHECK_V_ERROR; cval = *cvalOUT = (HYPRE_Int *)MALLOC_DH(nz*sizeof(HYPRE_Int)); CHECK_V_ERROR; rp[0] = 0; /* assume symmetry for now! */ for (i=0; i<m; ++i) { for (j=RP[i]; j<RP[i+1]; ++j) { HYPRE_Int col = CVAL[j]; if (col != i) { cval[idx++] = col; } } rp[i+1] = idx; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPartition" void Mat_dhPartition(Mat_dh mat, HYPRE_Int blocks, HYPRE_Int **beg_rowOUT, HYPRE_Int **row_countOUT, HYPRE_Int **n2oOUT, HYPRE_Int **o2nOUT) { START_FUNC_DH #ifndef HAVE_METIS_DH if (ignoreMe) SET_V_ERROR("not compiled for metis!"); #else HYPRE_Int *beg_row, *row_count, *n2o, *o2n, bk, new, *part; HYPRE_Int m = mat->m; HYPRE_Int i, cutEdgeCount; double zero = 0.0; HYPRE_Int metisOpts[5] = {0, 0, 0, 0, 0}; HYPRE_Int *rp, *cval; /* allocate storage for returned arrays */ beg_row = *beg_rowOUT = (HYPRE_Int *)MALLOC_DH(blocks*sizeof(HYPRE_Int)); CHECK_V_ERROR; row_count = *row_countOUT = (HYPRE_Int *)MALLOC_DH(blocks*sizeof(HYPRE_Int)); CHECK_V_ERROR; *n2oOUT = n2o = (HYPRE_Int *)MALLOC_DH(m*sizeof(HYPRE_Int)); CHECK_V_ERROR; *o2nOUT = o2n = (HYPRE_Int *)MALLOC_DH(m*sizeof(HYPRE_Int)); CHECK_V_ERROR; #if 0 ============================================================= Metis arguments: n - number of nodes rp[], cval[] NULL, NULL, 0 /*no edge or vertex weights*/ 0 /*use zero-based numbering*/ blocksIN, options[5] = 0 :: 0/1 use defauls; use uptions 1..4 1 :: edgecutOUT, part[] ============================================================= #endif /* form the graph representation that metis wants */ build_adj_lists_private(mat, &rp, &cval); CHECK_V_ERROR; part = (HYPRE_Int *)MALLOC_DH(m*sizeof(HYPRE_Int)); CHECK_V_ERROR; /* get parition vector from metis */ METIS_PartGraphKway(&m, rp, cval, NULL, NULL, &zero, &zero, &blocks, metisOpts, &cutEdgeCount, part); FREE_DH(rp); CHECK_V_ERROR; FREE_DH(cval); CHECK_V_ERROR; if (mat->debug) { printf_dh("\nmetis partitioning vector; blocks= %i\n", blocks); for (i=0; i<m; ++i) printf_dh(" %i %i\n", i+1, part[i]); } /* compute beg_row, row_count arrays from partition vector */ for (i=0; i<blocks; ++i) row_count[i] = 0; for (i=0; i<m; ++i) { bk = part[i]; /* block to which row i belongs */ row_count[bk] += 1; } beg_row[0] = 0; for (i=1; i<blocks; ++i) beg_row[i] = beg_row[i-1] + row_count[i-1]; if (mat->debug) { printf_dh("\nrow_counts: "); for (i=0; i<blocks; ++i) printf_dh(" %i", row_count[i]); printf_dh("\nbeg_row: "); for (i=0; i<blocks; ++i) printf_dh(" %i", beg_row[i]+1); printf_dh("\n"); } /* compute permutation vector */ { HYPRE_Int *tmp = (HYPRE_Int*)MALLOC_DH(blocks*sizeof(HYPRE_Int)); CHECK_V_ERROR; memcpy(tmp, beg_row, blocks*sizeof(HYPRE_Int)); for (i=0; i<m; ++i) { bk = part[i]; /* block to which row i belongs */ new = tmp[bk]; tmp[bk] += 1; o2n[i] = new; n2o[new] = i; } FREE_DH(tmp); } FREE_DH(part); CHECK_V_ERROR; #endif END_FUNC_DH }
GB_unaryop__lnot_int64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int8 // op(A') function: GB_tran__lnot_int64_int8 // C type: int64_t // A type: int8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int8 ( int64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif