source
stringlengths
3
92
c
stringlengths
26
2.25M
mpi_utils.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include "mpi.h" #include "data_structures.h" extern void copyv(int *a, int * b, int n); #define SRC_BUF(i,j,k) (src_buf[((k)*(src_size[1])+(j))*(src_size[0])+(i)]) #define DST_BUF(i,j,k) (dst_buf[((k)*(dst_size[1])+(j))*(dst_size[0])+(i)]) void sub_array_copy(const real_t * restrict src_buf, real_t * restrict dst_buf, int *src_size, int *dst_size, int *cpy_size, int *src_offs, int *dst_offs){ int *ds = dst_size; int i,j,k; #pragma omp parallel { #pragma omp for private(k,j,i) schedule(static) for(i=0; i<cpy_size[2]; i++){ for(j=0; j<cpy_size[1]; j++){ for(k=0; k<cpy_size[0]; k++){ DST_BUF(k+dst_offs[0],j+dst_offs[1],i+dst_offs[2]) = SRC_BUF(k+src_offs[0], j+src_offs[1], i+src_offs[2]); } } } } } void sub_array_copy_tg(const real_t * restrict src_buf, real_t * restrict dst_buf, int *src_size, int *dst_size, int *cpy_size, int *src_offs, int *dst_offs, int tgs){ int *ds = dst_size; int i,j,k; #pragma omp parallel num_threads(tgs) { #pragma omp for private(k,j,i) schedule(static) for(i=0; i<cpy_size[2]; i++){ for(j=0; j<cpy_size[1]; j++){ for(k=0; k<cpy_size[0]; k++){ DST_BUF(k+dst_offs[0],j+dst_offs[1],i+dst_offs[2]) = SRC_BUF(k+src_offs[0], j+src_offs[1], i+src_offs[2]); } } } } } void mpi_topology_init(Parameters *p) { int old_rank; old_rank = p->mpi_rank; // if(p->is_diamond_ts == 1){ // diamond methods // p->t.is_periodic[2] = 1; // diamonds across the z-direction perform periodic communication // } if(p->target_ts == 2){ // intra-diamond methods p->t.is_periodic[1] = 1; // diamonds across the y-direction perform periodic communication } ierr = MPI_Cart_create(MPI_COMM_WORLD, 3, p->t.shape, p->t.is_periodic, 1, &(p->t.cart_comm)); CHKERR(ierr); // MPI_Errhandler_set(p->t.cart_comm, MPI_ERRORS_RETURN); MPI_Comm_rank (p->t.cart_comm, &(p->mpi_rank)); ierr = MPI_Cart_coords(p->t.cart_comm, p->mpi_rank, 3, p->t.rank_coords); CHKERR(ierr); ierr = MPI_Cart_shift(p->t.cart_comm, 0, 1, &(p->t.left), &(p->t.right)); CHKERR(ierr); // in X direction ierr = MPI_Cart_shift(p->t.cart_comm, 1, 1, &(p->t.down), &(p->t.up)); CHKERR(ierr); // in Y direction ierr = MPI_Cart_shift(p->t.cart_comm, 2, 1, &(p->t.back), &(p->t.front)); CHKERR(ierr); // in Z direction if(p->debug ==1){ MPI_Barrier(MPI_COMM_WORLD); if(p->mpi_rank == 0) { printf("\n******************************************************\n"); fflush(stdout); printf("DEBUG topology initialization information BEGIN\n"); fflush(stdout); printf("******************************************************\n"); fflush(stdout); sleep(1); } int j; for(j=0; j<p->mpi_size; j++){ if(j == old_rank){ printf("Rank %03d -> %03d\n", old_rank, p->mpi_rank); fflush(stdout); } MPI_Barrier(MPI_COMM_WORLD); } sleep(1); MPI_Barrier(MPI_COMM_WORLD); for(j=0; j<p->mpi_size; j++){ if(j == p->mpi_rank){ printf("Rank %03d topology (npx, npy, npz):(%02d,%02d,%02d) | left:%03d right:%03d | down:%03d up:%03d | back:%03d front:%03d\n", p->mpi_rank, p->t.rank_coords[0], p->t.rank_coords[1], p->t.rank_coords[2], p->t.left, p->t.right, p->t.down, p->t.up, p->t.back, p->t.front); fflush(stdout); } MPI_Barrier(MPI_COMM_WORLD); } sleep(1); if(p->mpi_rank == 0) { printf("******************************************************\n"); fflush(stdout); printf("DEBUG topology initialization information END\n"); fflush(stdout); printf("******************************************************\n"); fflush(stdout); } } } void standard_mpi_halo_init(Parameters *p){ if (p->t.shape[0] > 1) { p->h[0].shape[0]= p->stencil.r; p->h[0].shape[1]= p->ldomain_shape[1]-2*p->stencil.r; p->h[0].shape[2]= p->ldomain_shape[2]-2*p->stencil.r; // Y and Z direction have fixed Halo beginning across X p->h[0].recv_b[0]= 0; p->h[0].recv_e[0]= p->lstencil_shape[0]+p->stencil.r; p->h[0].recv_b[1]= p->stencil.r; p->h[0].recv_e[1]= p->stencil.r; p->h[0].recv_b[2]= p->stencil.r; p->h[0].recv_e[2]= p->stencil.r; p->h[0].send_b[0]= p->stencil.r; p->h[0].send_e[0]= p->lstencil_shape[0]; p->h[0].send_b[1]= p->stencil.r; p->h[0].send_e[1]= p->stencil.r; p->h[0].send_b[2]= p->stencil.r; p->h[0].send_e[2]= p->stencil.r; ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[0].shape, p->h[0].recv_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[0].recv_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[0].shape, p->h[0].recv_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[0].recv_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[0].recv_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[0].recv_he)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[0].shape, p->h[0].send_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[0].send_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[0].shape, p->h[0].send_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[0].send_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[0].send_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[0].send_he)); CHKERR(ierr); p->h[0].size = p->h[0].shape[0] * p->h[0].shape[1] * p->h[0].shape[2]; } if (p->t.shape[1] > 1) { p->h[1].shape[0]= p->lstencil_shape[0]; p->h[1].shape[1]= p->stencil.r; p->h[1].shape[2]= p->ldomain_shape[2]-2*p->stencil.r; // X and Z direction have fixed Halo beginning across Y p->h[1].recv_b[0]= p->stencil.r; p->h[1].recv_e[0]= p->stencil.r; p->h[1].recv_b[1]= 0; p->h[1].recv_e[1]= p->ldomain_shape[1]-p->stencil.r; p->h[1].recv_b[2]= p->stencil.r; p->h[1].recv_e[2]= p->stencil.r; p->h[1].send_b[0]= p->stencil.r; p->h[1].send_e[0]= p->stencil.r; p->h[1].send_b[1]= p->stencil.r; p->h[1].send_e[1]= p->ldomain_shape[1]-2*p->stencil.r; p->h[1].send_b[2]= p->stencil.r; p->h[1].send_e[2]= p->stencil.r; ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[1].shape, p->h[1].recv_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[1].shape, p->h[1].recv_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[1].shape, p->h[1].send_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[1].shape, p->h[1].send_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[1].send_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->h[1].send_he)); CHKERR(ierr); p->h[1].size = p->h[1].shape[0] * p->h[1].shape[1] * p->h[1].shape[2]; } int z_halo_size, xy_plain; if (p->t.shape[2] > 1) { if (p->h[2].is_contiguous==1){ p->h[2].shape[0]= p->ldomain_shape[0]; p->h[2].shape[1]= p->ldomain_shape[1]; } else{ p->h[2].shape[0]= p->lstencil_shape[0]; p->h[2].shape[1]= p->lstencil_shape[1]; } p->h[2].shape[2]= p->stencil.r; p->h[2].recv_b[0]= p->stencil.r; p->h[2].recv_b[1]= p->stencil.r; xy_plain = p->ldomain_shape[0] * p->ldomain_shape[1]; p->h[2].recv_b[2] = 0; p->h[2].send_b[2] = xy_plain * p->stencil.r; p->h[2].send_e[2] = xy_plain * (p->ldomain_shape[2]-2*p->stencil.r); p->h[2].recv_e[2] = xy_plain * (p->ldomain_shape[2]-p->stencil.r); if(p->h[2].is_contiguous == 0){ ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->h[2].shape, p->h[2].recv_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->h[2].halo)); CHKERR(ierr); } else{ // contiguous type is required z_halo_size = p->h[2].shape[0] * p->h[2].shape[1] * p->h[2].shape[2]; ierr = MPI_Type_contiguous(z_halo_size, MPI_real_t, &(p->h[2].halo)); CHKERR(ierr); } ierr = MPI_Type_commit(&(p->h[2].halo)); CHKERR(ierr); p->h[2].size = p->h[2].shape[0] * p->h[2].shape[1] * p->h[2].shape[2]; } } void intra_diamond_mpi_halo_init(Parameters *p) { if(p->t.shape[1] == 1) return; // Halo data types for the buffer u p->hu[1].shape[0]= p->lstencil_shape[0]; p->hu[1].shape[1]= p->stencil.r * (p->t_dim+1); p->hu[1].shape[2]= p->lstencil_shape[2]; p->hu[1].size = p->hu[1].shape[0] * p->hu[1].shape[1] * p->hu[1].shape[2]; // X and Z directions have fixed Halo beginning across Y p->hu[1].recv_b[0]= p->stencil.r; p->hu[1].recv_b[1]= p->stencil.r; // NOTE: shifted by halo size p->hu[1].recv_b[2]= p->stencil.r; p->hu[1].recv_e[0]= p->stencil.r; p->hu[1].recv_e[1]= p->ldomain_shape[1] - p->stencil.r*(p->t_dim +2); p->hu[1].recv_e[2]= p->stencil.r; p->hu[1].send_b[0]= p->stencil.r; p->hu[1].send_b[1]= p->stencil.r; p->hu[1].send_b[2]= p->stencil.r; p->hu[1].send_e[0]= p->stencil.r; p->hu[1].send_e[1]= p->ldomain_shape[1] - p->stencil.r*(p->t_dim +2); p->hu[1].send_e[2]= p->stencil.r; if(p->debug ==1){ MPI_Barrier(MPI_COMM_WORLD); if(p->mpi_rank == 0) { printf("\n******************************************************\n"); fflush(stdout); printf("DEBUG u halo information BEGIN\n"); fflush(stdout); printf("******************************************************\n"); fflush(stdout); sleep(1); } int j; for(j=0; j<p->mpi_size; j++){ if(j == p->mpi_rank){ printf("[%02d]:top(%02d,%02d,%02d)\n", p->mpi_rank, p->t.rank_coords[0], p->t.rank_coords[1], p->t.rank_coords[2]); fflush(stdout); printf(" halo shape:(%03d,%03d,%03d)\n", p->hu[1].shape[0], p->hu[1].shape[1], p->hu[1].shape[2]); fflush(stdout); printf(" Recv begin:(%03d,%03d,%03d)\n", p->hu[1].recv_b[0], p->hu[1].recv_b[1], p->hu[1].recv_b[2]); fflush(stdout); printf(" Recv end :(%03d,%03d,%03d)\n", p->hu[1].recv_e[0], p->hu[1].recv_e[1], p->hu[1].recv_e[2]); fflush(stdout); printf(" Send begin:(%03d,%03d,%03d)\n", p->hu[1].send_b[0], p->hu[1].send_b[1], p->hu[1].send_b[2]); fflush(stdout); printf(" Send end :(%03d,%03d,%03d)\n", p->hu[1].send_e[0], p->hu[1].send_e[1], p->hu[1].send_e[2]); fflush(stdout); printf("\n"); fflush(stdout); } MPI_Barrier(MPI_COMM_WORLD); } if(p->mpi_rank == 0) { printf("******************************************************\n"); fflush(stdout); printf("DEBUG u halo information END\n"); fflush(stdout); printf("******************************************************\n\n"); fflush(stdout); } } ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hu[1].shape, p->hu[1].recv_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->hu[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hu[1].shape, p->hu[1].recv_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->hu[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hu[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hu[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hu[1].shape, p->hu[1].send_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->hu[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hu[1].shape, p->hu[1].send_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->hu[1].send_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hu[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hu[1].send_he)); CHKERR(ierr); // Halo data types for the buffer v copyv(p->hu[1].shape , p->hv[1].shape, 3); copyv(p->hu[1].recv_b, p->hv[1].recv_b, 3); p->hv[1].recv_b[1]= 0; copyv(p->hu[1].recv_e, p->hv[1].recv_e, 3); p->hv[1].recv_e[1]= p->ldomain_shape[1] - p->stencil.r*(p->t_dim + 1); copyv(p->hu[1].send_b, p->hv[1].send_b, 3); p->hv[1].send_b[1]= 2*p->stencil.r; copyv(p->hu[1].send_e, p->hv[1].send_e, 3); p->hv[1].send_e[1]= p->ldomain_shape[1] - p->stencil.r*(p->t_dim +3); p->hv[1].size = p->hu[1].size; if(p->debug ==1){ MPI_Barrier(MPI_COMM_WORLD); if(p->mpi_rank == 0) { printf("\n******************************************************\n"); fflush(stdout); printf("DEBUG v halo information BEGIN\n"); fflush(stdout); printf("******************************************************\n"); fflush(stdout); sleep(1); } int j; for(j=0; j<p->mpi_size; j++){ if(j == p->mpi_rank){ printf("[%02d]:top(%02d,%02d,%02d)\n", p->mpi_rank, p->t.rank_coords[0], p->t.rank_coords[1], p->t.rank_coords[2]); fflush(stdout); printf(" halo shape:(%03d,%03d,%03d)\n", p->hv[1].shape[0], p->hv[1].shape[1], p->hv[1].shape[2]); fflush(stdout); printf(" Recv begin:(%03d,%03d,%03d)\n", p->hv[1].recv_b[0], p->hv[1].recv_b[1], p->hv[1].recv_b[2]); fflush(stdout); printf(" Recv end :(%03d,%03d,%03d)\n", p->hv[1].recv_e[0], p->hv[1].recv_e[1], p->hv[1].recv_e[2]); fflush(stdout); printf(" Send begin:(%03d,%03d,%03d)\n", p->hv[1].send_b[0], p->hv[1].send_b[1], p->hv[1].send_b[2]); fflush(stdout); printf(" Send end :(%03d,%03d,%03d)\n", p->hv[1].send_e[0], p->hv[1].send_e[1], p->hv[1].send_e[2]); fflush(stdout); printf("\n"); fflush(stdout); } MPI_Barrier(MPI_COMM_WORLD); } if(p->mpi_rank == 0) { printf("******************************************************\n"); fflush(stdout); printf("DEBUG v halo information END\n"); fflush(stdout); printf("******************************************************\n\n"); fflush(stdout); } } ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hv[1].shape, p->hv[1].recv_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->hv[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hv[1].shape, p->hv[1].recv_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->hv[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hv[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hv[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hv[1].shape, p->hv[1].send_b, MPI_ORDER_FORTRAN, MPI_real_t, &(p->hv[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_create_subarray(3, p->ldomain_shape, p->hv[1].shape, p->hv[1].send_e , MPI_ORDER_FORTRAN, MPI_real_t, &(p->hv[1].send_he)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hv[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_commit(&(p->hv[1].send_he)); CHKERR(ierr); } void mpi_halo_init(Parameters *p) { switch(p->target_ts){ case 0: //standard methods case 1: standard_mpi_halo_init(p); break; case 2: // intra-diamond methods intra_diamond_mpi_halo_init(p); break; } } void standard_mpi_halo_finalize(Parameters *p){ int i; for(i=0; i<2; i++){ if (p->t.shape[i] > 1) { ierr = MPI_Type_free(&(p->h[i].recv_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->h[i].recv_he)); CHKERR(ierr); ierr = MPI_Type_free(&(p->h[i].send_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->h[i].send_he)); CHKERR(ierr); } } if (p->t.shape[2] > 1) ierr = MPI_Type_free(&(p->h[i].halo)); CHKERR(ierr); } void intra_diamond_mpi_halo_finalize(Parameters *p){ if(p->t.shape[1] > 1){ ierr = MPI_Type_free(&(p->hu[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hu[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hu[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hu[1].send_he)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hv[1].recv_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hv[1].recv_he)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hv[1].send_hb)); CHKERR(ierr); ierr = MPI_Type_free(&(p->hv[1].send_he)); CHKERR(ierr); } } void mpi_halo_finalize(Parameters *p){ switch(p->target_ts){ case 0: //standard methods case 1: standard_mpi_halo_finalize(p); break; case 2: // intra-diamond methods intra_diamond_mpi_halo_finalize(p); break; } }
omp_loop1.c
/* vim: set ts=4 sw=4: */ /* Filename : omp_loop1.c * Description : simple OpenMP model * Author : SunYoung Kim <[email protected]> * Notes : */ #include <stdio.h> int main() { int i; #pragma omp parallel #pragma omp for for (i=0; i<8; i++) { printf("[%d] Hello OpenMP\n", i); } /* implicit barrier */ return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) { for (t4=max(max(max(0,ceild(3*t1-127,128)),ceild(24*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(12*t1+Nx+21,512)),floord(24*t2+Nx+20,512)),floord(24*t3+Nx+20,512)),floord(24*t1-24*t2+Nz+Nx+19,512));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),512*t4+510),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
piecewise_linear_model.h
// This file is part of PGM-index <https://github.com/gvinciguerra/PGM-index>. // Copyright (c) 2018 Giorgio Vinciguerra. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <cmath> #include <limits> #include <vector> #include <stdexcept> #include <type_traits> #ifdef _OPENMP #include <omp.h> #else #warning Compilation with -fopenmp is recommended typedef int omp_int_t; inline omp_int_t omp_get_max_threads() { return 1; } #endif template<typename T> using LargeSigned = typename std::conditional_t<std::is_floating_point_v<T>, long double, std::conditional_t<(sizeof(T) < 8), int64_t, __int128>>; template<typename X, typename Y> class OptimalPiecewiseLinearModel { private: using SX = LargeSigned<X>; using SY = LargeSigned<Y>; struct Slope { SX dx{}; SY dy{}; bool operator<(const Slope &p) const { return dy * p.dx < dx * p.dy; } bool operator>(const Slope &p) const { return dy * p.dx > dx * p.dy; } bool operator==(const Slope &p) const { return dy * p.dx == dx * p.dy; } bool operator!=(const Slope &p) const { return dy * p.dx != dx * p.dy; } explicit operator long double() const { return dy / (long double) dx; } }; struct StoredPoint { X x; Y y; }; struct Point { X x{}; SY y{}; Slope operator-(const Point &p) const { return {SX(x) - p.x, y - p.y}; } }; template<bool Upper> struct Hull : private std::vector<StoredPoint> { const SY epsilon; explicit Hull(SY epsilon) : std::vector<StoredPoint>(), epsilon(Upper ? epsilon : -epsilon) {} Point operator[](size_t i) const { auto &p = std::vector<StoredPoint>::operator[](i); return {p.x, SY(p.y) + epsilon}; } void clear() { std::vector<StoredPoint>::clear(); } void resize(size_t n) { std::vector<StoredPoint>::resize(n); } void reserve(size_t n) { std::vector<StoredPoint>::reserve(n); } size_t size() const { return std::vector<StoredPoint>::size(); } void push(X x, Y y) { std::vector<StoredPoint>::emplace_back(StoredPoint{x, y}); }; }; const Y epsilon; Hull<false> lower; Hull<true> upper; X first_x = 0; X last_x = 0; size_t lower_start = 0; size_t upper_start = 0; size_t points_in_hull = 0; Point rectangle[4]; auto cross(const Point &O, const Point &A, const Point &B) const { auto OA = A - O; auto OB = B - O; return (OA.dx * OB.dy) - (OA.dy * OB.dx); } public: class CanonicalSegment; explicit OptimalPiecewiseLinearModel(Y epsilon) : epsilon(epsilon), lower(epsilon), upper(epsilon) { if (epsilon < 0) throw std::invalid_argument("epsilon cannot be negative"); upper.reserve(1u << 16); lower.reserve(1u << 16); } bool add_point(const X &x, const Y &y) { if (points_in_hull > 0 && x <= last_x) throw std::logic_error("Points must be increasing by x."); last_x = x; Point p1{x, SY(y) + epsilon}; Point p2{x, SY(y) - epsilon}; if (points_in_hull == 0) { first_x = x; rectangle[0] = p1; rectangle[1] = p2; upper.clear(); lower.clear(); upper.push(x, y); lower.push(x, y); upper_start = lower_start = 0; ++points_in_hull; return true; } if (points_in_hull == 1) { rectangle[2] = p2; rectangle[3] = p1; upper.push(x, y); lower.push(x, y); ++points_in_hull; return true; } auto slope1 = rectangle[2] - rectangle[0]; auto slope2 = rectangle[3] - rectangle[1]; bool outside_line1 = p1 - rectangle[2] < slope1; bool outside_line2 = p2 - rectangle[3] > slope2; if (outside_line1 || outside_line2) { points_in_hull = 0; return false; } if (p1 - rectangle[1] < slope2) { // Find extreme slope auto min = lower[lower_start] - p1; auto min_i = lower_start; for (auto i = lower_start + 1; i < lower.size(); i++) { auto val = (lower[i] - p1); if (val > min) break; min = val; min_i = i; } rectangle[1] = lower[min_i]; rectangle[3] = p1; lower_start = min_i; // Hull update auto end = upper.size(); for (; end >= upper_start + 2 && cross(upper[end - 2], upper[end - 1], p1) <= 0; --end) continue; upper.resize(end); upper.push(x, y); } if (p2 - rectangle[0] > slope1) { // Find extreme slope auto max = upper[upper_start] - p2; auto max_i = upper_start; for (auto i = upper_start + 1; i < upper.size(); i++) { auto val = (upper[i] - p2); if (val < max) break; max = val; max_i = i; } rectangle[0] = upper[max_i]; rectangle[2] = p2; upper_start = max_i; // Hull update auto end = lower.size(); for (; end >= lower_start + 2 && cross(lower[end - 2], lower[end - 1], p2) >= 0; --end) continue; lower.resize(end); lower.push(x, y); } ++points_in_hull; return true; } CanonicalSegment get_segment(size_t number) { if (points_in_hull == 1) return CanonicalSegment(rectangle[0], rectangle[1], first_x); return CanonicalSegment(rectangle, first_x, number); } void reset() { points_in_hull = 0; lower.clear(); upper.clear(); } }; template<typename X, typename Y> class OptimalPiecewiseLinearModel<X, Y>::CanonicalSegment { friend class OptimalPiecewiseLinearModel; Point rectangle[4]; X first; size_t number; CanonicalSegment(const Point &p0, const Point &p1, X first) : rectangle{p0, p1, p0, p1}, first(first), number(1) {}; CanonicalSegment(const Point (&rectangle)[4], X first, size_t number) : rectangle{rectangle[0], rectangle[1], rectangle[2], rectangle[3]}, first(first), number(number) {}; bool one_point() const { return rectangle[0].x == rectangle[2].x && rectangle[0].y == rectangle[2].y && rectangle[1].x == rectangle[3].x && rectangle[1].y == rectangle[3].y; } public: CanonicalSegment() = default; X get_first_x() const { return first; } size_t get_number() const{ return number; } std::pair<long double, long double> get_intersection() const { auto &p0 = rectangle[0]; auto &p1 = rectangle[1]; auto &p2 = rectangle[2]; auto &p3 = rectangle[3]; auto slope1 = p2 - p0; auto slope2 = p3 - p1; if (one_point() || slope1 == slope2) return {p0.x, p0.y}; auto p0p1 = p1 - p0; auto a = slope1.dx * slope2.dy - slope1.dy * slope2.dx; auto b = (p0p1.dx * slope2.dy - p0p1.dy * slope2.dx) / static_cast<long double>(a); auto i_x = p0.x + b * slope1.dx; auto i_y = p0.y + b * slope1.dy; return {i_x, i_y}; } std::pair<long double, long double> get_floating_point_segment(const X &origin) const { if (one_point()) return {0, (rectangle[0].y + rectangle[1].y) / 2}; auto[i_x, i_y] = get_intersection(); auto[min_slope, max_slope] = get_slope_range(); auto slope = (min_slope + max_slope) / 2.; auto intercept = i_y - (i_x - origin) * slope; return {slope, intercept}; } std::pair<long double, long double> get_slope_range() const { if (one_point()) return {0, 1}; auto min_slope = static_cast<long double>(rectangle[2] - rectangle[0]); auto max_slope = static_cast<long double>(rectangle[3] - rectangle[1]); return {min_slope, max_slope}; } }; template<typename Fin, typename Fout> size_t make_segmentation(size_t n, size_t epsilon, Fin in, Fout out) { if (n == 0) return 0; using X = typename std::invoke_result_t<Fin, size_t>::first_type; using Y = typename std::invoke_result_t<Fin, size_t>::second_type; size_t c = 0; size_t start = 0; auto p = in(0); OptimalPiecewiseLinearModel<X, Y> opt(epsilon); opt.add_point(p.first, p.second); for (size_t i = 1; i < n; ++i) { auto next_p = in(i); if (i != start && next_p.first == p.first) continue; p = next_p; if (!opt.add_point(p.first, p.second)) { out(opt.get_segment(i-start)); start = i; --i; ++c; } } out(opt.get_segment(n-start)); return ++c; } template<typename Fin, typename Fout> size_t make_segmentation_par(size_t n, size_t epsilon, Fin in, Fout out) { auto parallelism = std::min<size_t>(omp_get_max_threads(), 20); auto chunk_size = n / parallelism; auto c = 0ull; if (parallelism == 1 || n < 1ull << 15) return make_segmentation(n, epsilon, in, out); using X = typename std::invoke_result_t<Fin, size_t>::first_type; using Y = typename std::invoke_result_t<Fin, size_t>::second_type; using canonical_segment = typename OptimalPiecewiseLinearModel<X, Y>::CanonicalSegment; std::vector<std::vector<canonical_segment>> results(parallelism); #pragma omp parallel for reduction(+:c) num_threads(parallelism) for (auto i = 0ull; i < parallelism; ++i) { auto first = i * chunk_size; auto last = i == parallelism - 1 ? n : first + chunk_size; if (first > 0) { for (; first < last; ++first) if (in(first).first != in(first - 1).first) break; if (first == last) continue; } auto in_fun = [in, first](auto j) { return in(first + j); }; auto out_fun = [&results, i](auto cs) { results[i].emplace_back(cs); }; results[i].reserve(chunk_size / (epsilon > 0 ? epsilon * epsilon : 16)); c += make_segmentation(last - first, epsilon, in_fun, out_fun); } for (auto &v : results) for (auto &cs : v) out(cs); return c; } template<typename RandomIt> auto make_segmentation(RandomIt first, RandomIt last, size_t epsilon) { using key_type = typename RandomIt::value_type; using canonical_segment = typename OptimalPiecewiseLinearModel<key_type, size_t>::CanonicalSegment; using pair_type = typename std::pair<key_type, size_t>; size_t n = std::distance(first, last); std::vector<canonical_segment> out; out.reserve(epsilon > 0 ? n / (epsilon * epsilon) : n / 16); auto in_fun = [first](auto i) { return pair_type(first[i], i); }; auto out_fun = [&out](auto cs) { out.push_back(cs); }; make_segmentation(n, epsilon, in_fun, out_fun); return out; }
GB_unop__identity_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_fc64 // op(A') function: GB_unop_tran__identity_bool_fc64 // C type: bool // A type: GxB_FC64_t // cast: bool cij = (creal (aij) != 0) || (cimag (aij) != 0) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_fc64 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdlib.h> #include <stdio.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define N 100 #define TEST_BROKEN 0 /* disable tests reardless of value below */ #define TEST1 1 #define TEST1 1 #define TEST2 1 #define TEST3 1 #define TEST4 1 #define TEST5 1 #define TEST6 1 #define TEST7 1 #define TEST8 1 #define TEST9 1 #define TEST10 1 #define TEST11 1 #define TEST11 1 #define TEST12 1 #define TEST13 1 #define TEST14 1 #define TEST15 1 #define TEST16 1 #define TEST17 1 #define TEST18 1 #define TEST19 1 #define TEST20 1 #define TEST21 1 #define TEST21 1 #define TEST22 1 #define TEST23 1 #define TEST24 1 #define TEST25 1 #define TEST26 1 #define TEST27 1 #define TEST28 1 #define TEST39 1 #define TEST31 1 #define TEST31 1 #define TEST32 1 #define TEST33 1 #define TEST34 1 #define TEST35 1 #define TEST36 1 #define TEST37 1 int main () { int a[N], b[N], c[N]; #if TEST1 check_offloading(); #endif long cpuExec = 0; int fail, ch; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } #if TEST2 // Test: no clauses fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 2\n"); else printf("Succeeded 2\n"); #endif #if TEST3 // Test: private, firstprivate, lastprivate, linear fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } int q = -5; int p = -3; int r = 0; int l = 10; if (!cpuExec) { #pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a, r) map(to: b,c) #pragma omp parallel #pragma omp for simd private(q) firstprivate(p) lastprivate(r) linear(l:2) for (int i = 0 ; i < N ; i++) { q = i + 5; p += i + 2; a[i] += p*b[i] + c[i]*q +l; r = i; } for (int i = 0 ; i < N ; i++) { int expected = (-1 + (-3 + i + 2)*i + (2*i)*(i + 5) + 10+(2*i)); if (a[i] != expected) { printf("Error at %d: device = %d, host = %d\n", i, a[i], expected); fail = 1; } } if (r != N-1) { printf("Error for lastprivate: device = %d, host = %d\n", r, N-1); fail = 1; } } if (fail) printf ("Failed 3\n"); else printf("Succeeded 3\n"); #endif #if TEST4 // Test: schedule static no chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(static) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 4\n"); else printf("Succeeded 4\n"); #endif #if TEST5 // Test: schedule static no chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: static) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 5\n"); else printf("Succeeded 5\n"); #endif #if TEST6 // Test: schedule static no chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: static) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 6\n"); else printf("Succeeded 6\n"); #endif #if TEST7 // Test: schedule static chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(static, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 7\n"); else printf("Succeeded 7\n"); #endif #if TEST8 // Test: schedule static chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: static, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 8\n"); else printf("Succeeded 8\n"); #endif #if TEST9 // Test: schedule static chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: static, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 9\n"); else printf("Succeeded 9\n"); #endif #if TEST10 // Test: schedule dyanmic no chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(dynamic) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 10\n"); else printf("Succeeded 10\n"); #endif #if TEST11 && TEST_BROKEN // hangs // Test: schedule dyanmic no chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: dynamic) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 11\n"); else printf("Succeeded 11\n"); #endif #if TEST12 && TEST_BROKEN // hangs // Test: schedule dyanmic no chunk, nonmonotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(nonmonotonic: dynamic) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 12\n"); else printf("Succeeded 12\n"); #endif #if TEST13 // Test: schedule dyanmic no chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: dynamic) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 13\n"); else printf("Succeeded 13\n"); #endif #if TEST14 // Test: schedule dynamic chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(dynamic, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 14\n"); else printf("Succeeded 14\n"); #endif #if TEST15 && TEST_BROKEN // Test: schedule dynamic chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: dynamic, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 15\n"); else printf("Succeeded 15\n"); #endif #if TEST16 && TEST_BROKEN // Test: schedule dynamic chunk, nonmonotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(nonmonotonic: dynamic, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 16\n"); else printf("Succeeded 16\n"); #endif #if TEST17 // Test: schedule dynamic chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: dynamic, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 17\n"); else printf("Succeeded 17\n"); #endif #if TEST18 // Test: schedule guided no chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(guided) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 18\n"); else printf("Succeeded 18\n"); #endif #if TEST19 && TEST_BROKEN // Test: schedule guided no chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: guided) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 19\n"); else printf("Succeeded 19\n"); #endif #if TEST20 && TEST_BROKEN // Test: schedule guided no chunk, nonmonotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(nonmonotonic: guided) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 20\n"); else printf("Succeeded 20\n"); #endif #if TEST21 // Test: schedule guided no chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: guided) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 21\n"); else printf("Succeeded 21\n"); #endif #if TEST22 // Test: schedule guided chunk fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(guided, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 22\n"); else printf("Succeeded 22\n"); #endif #if TEST23 && TEST_BROKEN // Test: schedule guided chunk, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: guided, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 23\n"); else printf("Succeeded 23\n"); #endif #if TEST24 && TEST_BROKEN // Test: schedule guided chunk, nonmonotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(nonmonotonic: guided, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 24\n"); else printf("Succeeded 24\n"); #endif #if TEST25 // Test: schedule guided chunk, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } ch = 10; if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: guided, ch) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 25\n"); else printf("Succeeded 25\n"); #endif #if TEST26 // Test: schedule auto fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(auto) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 26\n"); else printf("Succeeded 26\n"); #endif #if TEST27 // Test: schedule auto, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: auto) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 27\n"); else printf("Succeeded 27\n"); #endif #if TEST28 // Test: schedule auto, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: auto) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 28\n"); else printf("Succeeded 28\n"); #endif #if TEST29 // Test: schedule runtime fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(runtime) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 29\n"); else printf("Succeeded 29\n"); #endif #if TEST30 && TEST_BROKEN // Test: schedule runtime, monotonic fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(monotonic: runtime) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 30\n"); else printf("Succeeded 30\n"); #endif #if TEST31 // Test: schedule runtime, simd fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd schedule(simd: runtime) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 31\n"); else printf("Succeeded 31\n"); #endif #if TEST32 // Test: collapse fail = 0; int ma[N][N], mb[N][N], mc[N][N]; for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { ma[i][j] = -1; mb[i][j] = i; mc[i][j] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: ma) map(to: mb,mc) #pragma omp parallel #pragma omp for simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) ma[i][j] += mb[i][j] + mc[i][j]; for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (ma[i][j] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, ma[i][j], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 32\n"); else printf("Succeeded 32\n"); #endif #if TEST33 // Test: ordered fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd ordered for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 33\n"); else printf("Succeeded 33\n"); #endif #if TEST34 // Test: nowait fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd nowait for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 34\n"); else printf("Succeeded 34\n"); #endif #if TEST35 // Test: safelen fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd safelen(16) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 35\n"); else printf("Succeeded 35\n"); #endif #if TEST36 // Test: simdlen fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd simdlen(16) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 36\n"); else printf("Succeeded 36\n"); #endif #if TEST37 // Test: aligned fail = 0; for (int i = 0 ; i < N ; i++) { a[i] = -1; b[i] = i; c[i] = 2*i; } if (!cpuExec) { #pragma omp target map(tofrom: a) map(to: b,c) #pragma omp parallel #pragma omp for simd aligned(a,b,c:8) for (int i = 0 ; i < N ; i++) a[i] += b[i] + c[i]; for (int i = 0 ; i < N ; i++) if (a[i] != (-1 + i + 2*i)) { printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i)); fail = 1; } } if (fail) printf ("Failed 37\n"); else printf("Succeeded 37\n"); #endif return 0; }
vectors.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "timer.h" int g_matrix_n, g_matrix_m, g_num_threads; typedef struct matrix_t { int** values; int* row_size; } matrix_t; void print_matrix(matrix_t*); /* Sum each row of the provided matrix, using the row_size to determine how many items are in a row */ int* sum_rows(matrix_t* matrix) { int *sum_vector = malloc(sizeof(int) * g_matrix_n); if (!sum_vector) { fprintf(stderr, "Failed to malloc sum_vector\n"); return NULL; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { sum_vector[i] = 0; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { sum_vector[n] += matrix->values[n][m]; } } return sum_vector; } /* Create a matrix, either triangular or non triangular and fill it with the appropriate values */ matrix_t* matrix(int is_triangular) { /* Malloc matrix struct */ matrix_t* matrix = malloc(sizeof(matrix_t*)); if (!matrix) { fprintf(stderr, "Failed to malloc struct matrix\n"); return NULL; } /* Malloc matrix values */ matrix->values = malloc(sizeof(int*) * g_matrix_m); if (!matrix->values) { fprintf(stderr, "Failed to malloc matrix\n"); return NULL; } /* Malloc matrix row sizes */ matrix->row_size = malloc(sizeof(int) * g_matrix_n); if (!matrix->row_size) { fprintf(stderr, "Failed to malloc row size\n"); return NULL; } /* Malloc matrix columns */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { matrix->row_size[i] = g_matrix_n - (i * is_triangular); matrix->values[i] = malloc(sizeof(int) * matrix->row_size[i]); if (!matrix->values[i]) { fprintf(stderr, "Failed to malloc matrix[%d]\n", i); } } /* Matrix[n][m] n = vertical, m = horizontal. eg. Matrix[2][3] is 2nd row (from top) 3rd value. */ /* n is vert size m = hori size */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { //#pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int m = 0; m < matrix->row_size[n]; m++) { matrix->values[n][m] = n + (m + (g_matrix_m - matrix->row_size[n])); } } return matrix; } matrix_t* init_matrix() { /* Make a normal, non-triangular matrix */ return matrix(0); } matrix_t* init_matrix_triangular() { /* Make a triangular matrix */ return matrix(1); } /* Print a matrix */ void print_matrix(matrix_t* matrix) { for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { printf("%d ", matrix->values[n][m]); if (matrix->values[n][m] < 10) { printf(" "); } else if (matrix->values[n][m] < 100) { printf(" "); } else if (matrix->values[n][m] < 1000) { printf(" "); } else if (matrix->values[n][m] < 10000) { printf(" "); } } printf("\n"); } return; } int main(int argc, char* argv[]) { double time; int* sum_vector; /* We allow only square matrices */ g_matrix_n = g_matrix_m = atoi(argv[1]); g_num_threads = atoi(argv[2]); matrix_t* matrix; matrix = init_matrix_triangular(); if (!matrix) { return EXIT_FAILURE; } timer_start(); sum_vector = sum_rows(matrix); if (!sum_vector) { return EXIT_FAILURE; } time = timer_end(); printf("%d, %d, %lf\n", g_matrix_n, g_num_threads, time); /* print_matrix(matrix); */ /* Free this stupid shit */ for (int i = 0; i < g_matrix_n; i++) { free(matrix->values[i]); } free(matrix->values); free(matrix->row_size); free(matrix); free(sum_vector); return EXIT_SUCCESS; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) { for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(8*t3+Nx-5,128));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),32*t4+30);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
vednnConvolutionBackwardFilter.c
#include "vednnConvolutionBackwardFilter.h" #include "vednn-def.h" #include <stdint.h> #include <stdio.h> static inline vednnError_t vednnConvolutionBackwardFilter_wrapper( vednnConvBackwardFilter_t pFunc, VEDNN_CONVBKF_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc( VEDNN_CONVBKF_ARGS_LIST ); #else // VEDNN_USE_OPENMP #ifndef VEDNN_OMP_GROUP_PARALLEL if ( __vednn_omp_num_threads == 1 ) { int64_t gOutChannel = pParamGradOut->channel; int64_t group = pParamConv->group; int64_t gOutChannelGroup = gOutChannel / group; return pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t gOutChannel = pParamGradOut->channel; int64_t group = pParamConv->group; int64_t gOutChannelGroup = gOutChannel / group; int64_t nOChannlel = gOutChannelGroup / nthreads ; int64_t remain = gOutChannelGroup % nthreads ; int64_t beginOChannel = nOChannlel * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myOChannel = nOChannlel + ( threadid < remain ? 1 : 0 ) ; if( myOChannel == 0 ) { rc |= VEDNN_SUCCESS ; } else { rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, beginOChannel, myOChannel ); } } return rc ; } #else // VEDNN_OMP_GROUP_PARALLEL if ( __vednn_omp_num_threads == 1 ) { int64_t gOutChannel = pParamGradOut->channel; int64_t group = pParamConv->group; int64_t gOutChannelGroup = gOutChannel / group; return pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup, 0, group); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t gOutChannel = pParamGradOut->channel; int64_t group = pParamConv->group; int64_t gOutChannelGroup = gOutChannel / group; if( gOutChannelGroup >= group ) { int64_t nOChannlel = gOutChannelGroup / nthreads ; int64_t remain = gOutChannelGroup % nthreads ; int64_t beginOChannel = nOChannlel * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myOChannel = nOChannlel + ( threadid < remain ? 1 : 0 ) ; if( myOChannel == 0 ) { rc |= VEDNN_SUCCESS ; } else { rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, beginOChannel, myOChannel, 0, group); } } else { int64_t nGroup = group / nthreads ; int64_t remain = group % nthreads ; int64_t beginGroup = nGroup * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myGroup = nGroup + ( threadid < remain ? 1 : 0 ) ; if( myGroup == 0 ) { rc |= VEDNN_SUCCESS ; } else { rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup, beginGroup, myGroup); } } } return rc ; } #endif // VEDNN_OMP_GROUP_PARALLEL #endif // VEDNN_USE_OMP } /* ----------------------------------------------------------------------- */ vednnError_t vednnConvolutionBackwardFilter( const vednnTensorParam_t *pParamIn, const void *pDataIn, const vednnTensorParam_t *pParamGradOut, const void *pDataGradOut, const vednnFilterParam_t *pParamGradKernel, void *pDataGradKernel, const vednnConvolutionParam_t *pParamConv, vednnConvolutionAlgorithm_t algo ) { switch( pParamGradKernel->layout ) { case VEDNN_FILTER_LAYOUT_NCHW : break ; case VEDNN_FILTER_LAYOUT_HWCN : if( pParamConv->group > 1 ) { fprintf(stderr, "[VEDNN ERROR] VEDNN does not support grouped convolution with filter_hwcn\n") ; return VEDNN_ERROR_INVALID_PARAM ; } break ; default : fprintf(stderr, "[VEDNN ERROR] Unknown Filter Layout %d\n", pParamGradKernel->layout) ; return VEDNN_ERROR_INVALID_PARAM ; } if (algo == VEDNN_CONV_ALGORITHM_DIRECT) { #define OMPWRAP( IMPL ) WRAP_RET(vednnConvolutionBackwardFilter_direct_##IMPL, \ vednnConvolutionBackwardFilter_wrapper, VEDNN_CONVBKF_ARGS_LIST ) #define DIL(N) (pParamConv->dilationHeight == (N) && pParamConv->dilationWidth == (N)) #define PAD(N) (pParamConv->padHeight == (N) && pParamConv->padWidth == (N)) #define STR(N) (pParamConv->strideHeight == (N) && pParamConv->strideWidth == (N)) #define KER(N) (pParamGradKernel->width == (N) && pParamGradKernel->height == (N)) #define IWU(N) (pParamIn->width <= (N)) #define OWU(N) (pParamGradOut->width <= (N)) #define OHWU(N) (pParamGradOut->width * pParamGradOut->height <= (N)) if ( pParamGradOut->height * pParamGradOut->width <= 16 || ( pParamGradOut->height * pParamGradOut->width < 64 && pParamGradOut->height * pParamGradOut->width < pParamIn->channel / pParamConv->group ) ) { OMPWRAP(vecC); }else if (STR(1) && DIL(1) && pParamIn->height == pParamGradOut->height && pParamIn->width == pParamGradOut->width ) // d1s1pS { if (KER(3)) { if (OHWU(256)) OMPWRAP(dil1_str1_padsame_ker3_ohwU256); else if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker3_owU128); else OMPWRAP(dil1_str1_padsame_ker3); }else if (KER(1)) { OMPWRAP(dil1_str1_padsame_ker1); }else if (KER(5)) { if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker5_owU128); else OMPWRAP(dil1_str1_padsame_ker5); }else if (KER(2)) { if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker2_owU128); else OMPWRAP(dil1_str1_padsame_ker2); } OMPWRAP(dil1_str1_padsame); } else if (DIL(1) && PAD(0) && pParamGradOut->height == (pParamIn->height - pParamGradKernel->height) / pParamConv->strideHeight + 1 && pParamGradOut->width == (pParamIn->width - pParamGradKernel->width) / pParamConv->strideWidth + 1 ) { // d1p0 and oh,ow correct for whatever stride if (KER(3) && STR(1) && IWU(256) && (pParamIn->width & 0x01) == 0 && (pParamGradOut->width & 0x01) == 0 && (((uint64_t)pDataIn) & 0x07) == 0 && (((uint64_t)pDataGradOut) & 0x07) == 0 ) { OMPWRAP(dil1_str1_pad0_ker3_ow2X_iw2XU256_igoaligned); } else if (KER(3) && OWU(128)) { if (STR(1)) OMPWRAP(dil1_str1_pad0_ker3_owU128); else OMPWRAP(dil1_pad0_ker3_owU128); } else if (KER(1)) { if (OHWU(64)) OMPWRAP(dil1_pad0_ker1_ohwU64); else if (OHWU(128)) OMPWRAP(dil1_pad0_ker1_ohwU128); else if (OWU(32)) OMPWRAP(dil1_pad0_ker1_owU32); else OMPWRAP(dil1_pad0_ker1); } else if (KER(4) && OWU(128) && STR(1)) OMPWRAP(dil1_str1_pad0_ker4_owU128); else if (OWU(32)) OMPWRAP(dil1_pad0_owU32); OMPWRAP(dil1_pad0); } else if(OWU(128)) { if (KER(3)) { if (STR(2) && DIL(1) && PAD(1)) OMPWRAP(dil1_str2_pad1_ker3_owU128) ; else OMPWRAP(ker3_owU128) ; }else if (KER(4) && STR(2) && DIL(1) && PAD(1)) OMPWRAP(dil1_str2_pad1_ker4_owU128) ; OMPWRAP(owU128); } OMPWRAP(default); } else { return VEDNN_ERROR_INVALID_PARAM ; } #undef OHWU #undef OWU #undef IWU #undef KER #undef STR #undef PAD #undef DIL #undef OMPWRAP } // vim: et sw=2 ts=2
opencl_rar_fmt_plug.c
/* RAR 3.x cracker patch for JtR. Hacked together during * April of 2011 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. * magnum added -p mode support, using code based on libclamav * and OMP, AES-NI and OpenCL support. * * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code is based on the work of Alexander L. Roshal (C) * * The unRAR sources may be used in any software to handle RAR * archives without limitations free of charge, but cannot be used * to re-create the RAR compression algorithm, which is proprietary. * Distribution of modified unRAR sources in separate form or as a * part of other software is permitted, provided that it is clearly * stated in the documentation and source comments that the code may * not be used to develop a RAR (WinRAR) compatible archiver. * * Huge thanks to Marc Bevand <m.bevand (at) gmail.com> for releasing unrarhp * (http://www.zorinaq.com/unrarhp/) and documenting the RAR encryption scheme. * This patch is made possible by unrarhp's documentation. * * http://anrieff.net/ucbench/technical_qna.html is another useful reference * for RAR encryption scheme. * * Thanks also to Pavel Semjanov for crucial help with Huffman table checks. * * For type = 0 for files encrypted with "rar -hp ..." option * archive_name:$RAR3$*type*hex(salt)*hex(partial-file-contents):type::::archive_name * * For type = 1 for files encrypted with "rar -p ..." option * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*archive_name*offset-for-ciphertext*method:type::file_name * * or (inlined binary) * * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*1*hex(full encrypted file)*method:type::file_name * */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_ocl_rar; #elif FMT_REGISTERS_H john_register_one(&fmt_ocl_rar); #else #define STEP 0 #define SEED 256 #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/engine.h> #include <openssl/evp.h> #include <openssl/ssl.h> #include "arch.h" #include "sha.h" #if AC_BUILT #include "autoconfig.h" #endif #if _MSC_VER || __MINGW32__ || __MINGW64__ || __CYGWIN__ || HAVE_WINDOWS_H #include "win32_memmap.h" #ifndef __CYGWIN__ #include "mmap-windows.c" #elif defined HAVE_MMAP #include <sys/mman.h> #endif #elif defined(HAVE_MMAP) #include <sys/mman.h> #endif #ifdef _OPENMP #include <omp.h> #include <pthread.h> #define OMP_SCALE 32 static pthread_mutex_t *lockarray; #endif #include "crc32.h" #include "misc.h" #include "common.h" #include "formats.h" #include "dyna_salt.h" #include "memory.h" #include "params.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #include "unrar.h" #include "common-opencl.h" #include "config.h" #include "jumbo.h" #define FORMAT_LABEL "rar-opencl" #define FORMAT_NAME "RAR3" #define ALGORITHM_NAME "SHA1 OpenCL AES" #ifdef DEBUG #define BENCHMARK_COMMENT " (length 1-16)" #else #define BENCHMARK_COMMENT " (length 4)" #endif #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 22 /* Max. currently supported is 22 */ #define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH) #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(rarfile*) #define SALT_ALIGN sizeof(rarfile*) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define OCL_CONFIG "rar" static const char * warn[] = { "key xfer: " , ", len xfer: " , ", init: " , ", loop: " , ", final: ", ", key xfer: ", ", iv xfer: " }; static int split_events[] = { 3, -1, -1 }; static int crypt_all(int *pcount, struct db_salt *_salt); static int crypt_all_benchmark(int *pcount, struct db_salt *_salt); //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" #define ITERATIONS 0x40000 #define HASH_LOOPS 0x04000 // Fixed, do not change #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) static int omp_t = 1; static unsigned char *saved_salt; static unsigned char *saved_key; static int new_keys; static int (*cracked); static unpack_data_t (*unpack_data); static unsigned int *saved_len; static unsigned char *aes_key; static unsigned char *aes_iv; typedef struct { dyna_salt dsalt; /* must be first. allows dyna_salt to work */ /* place all items we are NOT going to use for salt comparison, first */ unsigned char *blob; /* data from this point on, is part of the salt for compare reasons */ unsigned char salt[8]; int type; /* 0 = -hp, 1 = -p */ /* for rar -p mode only: */ union { unsigned int w; unsigned char c[4]; } crc; unsigned long long pack_size; unsigned long long unp_size; int method; unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash. // raw_data should be word aligned, and 'ok' unsigned char raw_data[1]; } rarfile; static rarfile *cur_file; /* Determines when to use CPU instead (eg. Single mode, few keys in a call) */ #define CPU_GPU_RATIO 32 static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_OutputBuf, cl_round, cl_aes_key, cl_aes_iv; static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_aes_key, pinned_aes_iv; static cl_kernel RarInit, RarFinal; /* cRARk use 4-char passwords for CPU benchmark */ static struct fmt_tests cpu_tests[] = { {"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"}, {"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"}, {"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; /* cRARk use 5-char passwords for GPU benchmark */ static struct fmt_tests gpu_tests[] = { {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG {"$RAR3$*0*af24c0c95e9cafc7*e7f207f30dec96a5ad6f917a69d0209e", "magnum"}, {"$RAR3$*0*2653b9204daa2a8e*39b11a475f486206e2ec6070698d9bbc", "123456"}, {"$RAR3$*0*63f1649f16c2b687*8a89f6453297bcdb66bd756fa10ddd98", "abc123"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*575b083d78672e85*965f1453*48*47*1*cd3d8756438f43ab70e668792e28053f0ad7449af1c66863e3e55332bfa304b2c082b9f23b36cd4a8ebc0b743618c5b2*30", "magnum"}, {"$RAR3$*1*6f5954680c87535a*965f1453*64*47*1*c9bb398b9a5d54f035fd22be54bc6dc75822f55833f30eb4fb8cc0b8218e41e6d01824e3467475b90b994a5ddb7fe19366d293c9ee305316c2a60c3a7eb3ce5a*33", "magnum"}, /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #if defined (_OPENMP) static void lock_callback(int mode, int type, const char *file, int line) { (void)file; (void)line; if (mode & CRYPTO_LOCK) pthread_mutex_lock(&(lockarray[type])); else pthread_mutex_unlock(&(lockarray[type])); } static unsigned long thread_id(void) { return omp_get_thread_num(); } static void init_locks(void) { int i; lockarray = (pthread_mutex_t*) OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t)); for (i = 0; i < CRYPTO_num_locks(); i++) pthread_mutex_init(&(lockarray[i]), NULL); CRYPTO_set_id_callback(thread_id); CRYPTO_set_locking_callback(lock_callback); } #endif /* _OPENMP */ /* Use AES-NI if available. This is not supported with low-level calls, we have to use EVP) */ static void init_aesni(void) { ENGINE *e; const char *engine_id = "aesni"; ENGINE_load_builtin_engines(); e = ENGINE_by_id(engine_id); if (!e) { //fprintf(stderr, "AES-NI engine not available\n"); return; } if (!ENGINE_init(e)) { fprintf(stderr, "AES-NI engine could not init\n"); ENGINE_free(e); return; } if (!ENGINE_set_default(e, ENGINE_METHOD_ALL & ~ENGINE_METHOD_RAND)) { /* This should only happen when 'e' can't initialise, but the * previous statement suggests it did. */ fprintf(stderr, "AES-NI engine initialized but then failed\n"); abort(); } ENGINE_finish(e); ENGINE_free(e); } #ifndef __APPLE__ /* Apple segfaults on this :) */ static void openssl_cleanup(void) { ENGINE_cleanup(); ERR_free_strings(); CRYPTO_cleanup_all_ex_data(); EVP_cleanup(); } #endif static void create_clobj(size_t gws, struct fmt_main *self) { int i; int bench_len = strlen(self->params.tests[0].plaintext) * 2; pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL , &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, UNICODE_LENGTH * gws, NULL , &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_key = (unsigned char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key"); memset(saved_key, 0, UNICODE_LENGTH * gws); pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_len = (unsigned int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len"); for (i = 0; i < gws; i++) saved_len[i] = bench_len; pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 8, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 8, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_salt = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 8, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt"); memset(saved_salt, 0, 8); cl_OutputBuf = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * 5 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); cl_round = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); // aes_key is uchar[16] but kernel treats it as uint[4] pinned_aes_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_uint) * 4 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_aes_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 4 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); aes_key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_aes_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_uint) * 4 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory aes_key"); memset(aes_key, 0, 16 * gws); pinned_aes_iv = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_aes_iv = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); aes_iv = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_aes_iv, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 16 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory aes_iv"); memset(aes_iv, 0, 16 * gws); HANDLE_CLERROR(clSetKernelArg(RarInit, 0, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(RarInit, 1, sizeof(cl_mem), (void*)&cl_round), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(cl_mem), (void*)&cl_round), "Error setting argument 2"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 3, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 3"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 4, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 4"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 5, sizeof(cl_mem), (void*)&cl_aes_iv), "Error setting argument 5"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 0, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 1, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 2, sizeof(cl_mem), (void*)&cl_aes_key), "Error setting argument 2"); cracked = mem_alloc(sizeof(*cracked) * gws); } /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return MIN( MIN(autotune_get_task_max_work_group_size(FALSE, 0, RarInit), autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)), autotune_get_task_max_work_group_size(FALSE, 0, RarFinal)); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return 1; else return 64; } static void release_clobj(void) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_aes_key, aes_key, 0, NULL, NULL), "Error Unmapping aes_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_aes_iv, aes_iv, 0, NULL, NULL), "Error Unmapping aes_iv"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(cl_aes_key), "Release aes_key"); HANDLE_CLERROR(clReleaseMemObject(cl_aes_iv), "Release aes_iv"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release saved_key"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release saved_len"); HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release salt"); HANDLE_CLERROR(clReleaseMemObject(pinned_aes_key), "Release aes_key"); HANDLE_CLERROR(clReleaseMemObject(pinned_aes_iv), "Release aes_iv"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release saved_key"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release saved_len"); HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release salt"); HANDLE_CLERROR(clReleaseMemObject(cl_OutputBuf), "Release OutputBuf"); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(RarInit), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(RarFinal), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); MEM_FREE(unpack_data); } static void clear_keys(void) { memset(saved_len, 0, sizeof(int) * global_work_size); } #undef set_key static void set_key(char *key, int index) { int plen; UTF16 buf[PLAINTEXT_LENGTH + 1]; /* UTF-16LE encode the password, encoding aware */ plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key)); if (plen < 0) plen = strlen16(buf); memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH); saved_len[index] = plen << 1; new_keys = 1; } static void *get_salt(char *ciphertext) { unsigned int i, type, ex_len; static unsigned char *ptr; /* extract data from "salt" */ char *encoded_salt; char *saltcopy = strdup(ciphertext); char *keep_ptr = saltcopy; rarfile *psalt; unsigned char tmp_salt[8]; int inlined = 1; SHA_CTX ctx; if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*)); saltcopy += 7; /* skip over "$RAR3$*" */ type = atoi(strtok(saltcopy, "*")); encoded_salt = strtok(NULL, "*"); for (i = 0; i < 8; i++) tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])]; if (type == 0) { /* rar-hp mode */ char *encoded_ct = strtok(NULL, "*"); psalt = mem_calloc(sizeof(*psalt)+16); psalt->type = type; ex_len = 16; memcpy(psalt->salt, tmp_salt, 8); for (i = 0; i < 16; i++) psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])]; psalt->blob = psalt->raw_data; psalt->pack_size = 16; } else { char *p = strtok(NULL, "*"); char crc_c[4]; unsigned long long pack_size; unsigned long long unp_size; for (i = 0; i < 4; i++) crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; pack_size = atoll(strtok(NULL, "*")); unp_size = atoll(strtok(NULL, "*")); inlined = atoi(strtok(NULL, "*")); ex_len = pack_size; /* load ciphertext. We allocate and load all files here, and they are freed when password found. */ #if HAVE_MMAP psalt = mem_calloc(sizeof(*psalt) + (inlined ? ex_len : 0)); #else psalt = mem_calloc(sizeof(*psalt)+ex_len); #endif psalt->type = type; memcpy(psalt->salt, tmp_salt, 8); psalt->pack_size = pack_size; psalt->unp_size = unp_size; memcpy(psalt->crc.c, crc_c, 4); if (inlined) { unsigned char *d = psalt->raw_data; p = strtok(NULL, "*"); for (i = 0; i < psalt->pack_size; i++) *d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; psalt->blob = psalt->raw_data; } else { FILE *fp; char *archive_name = strtok(NULL, "*"); long long pos = atoll(strtok(NULL, "*")); #if HAVE_MMAP if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } #ifdef RAR_DEBUG fprintf(stderr, "RAR mmap() len %llu offset 0\n", pos + psalt->pack_size); #endif psalt->blob = mmap(NULL, pos + psalt->pack_size, PROT_READ, MAP_SHARED, fileno(fp), 0); if (psalt->blob == MAP_FAILED) { fprintf(stderr, "Error loading file from " "archive '%s'. Archive possibly " "damaged.\n", archive_name); error(); } psalt->blob += pos; #else size_t count; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } jtr_fseek64(fp, pos, SEEK_SET); count = fread(psalt->raw_data, 1, psalt->pack_size, fp); if (count != psalt->pack_size) { fprintf(stderr, "Error loading file from archive '%s', expected %llu bytes, got %zu. Archive possibly damaged.\n", archive_name, psalt->pack_size, count); error(); } psalt->blob = psalt->raw_data; #endif fclose(fp); } p = strtok(NULL, "*"); psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])]; if (psalt->method != 0x30) #if ARCH_LITTLE_ENDIAN psalt->crc.w = ~psalt->crc.w; #else psalt->crc.w = JOHNSWAP(~psalt->crc.w); #endif } SHA1_Init(&ctx); SHA1_Update(&ctx, psalt->blob, psalt->pack_size); SHA1_Final(psalt->blob_hash, &ctx); MEM_FREE(keep_ptr); #if HAVE_MMAP psalt->dsalt.salt_alloc_needs_free = inlined; #else psalt->dsalt.salt_alloc_needs_free = 1; #endif psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0); memcpy(ptr, &psalt, sizeof(rarfile*)); return (void*)ptr; } static void set_salt(void *salt) { cur_file = *((rarfile**)salt); memcpy(saved_salt, cur_file->salt, 8); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, 8, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%u", PLAINTEXT_LENGTH); opencl_init("$JOHN/kernels/rar_kernel.cl", gpu_id, build_opts); // create kernels to execute RarInit = clCreateKernel(program[gpu_id], "RarInit", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); crypt_kernel = clCreateKernel(program[gpu_id], "RarHashLoop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); RarFinal = clCreateKernel(program[gpu_id], "RarFinal", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); #ifdef DEBUG self->params.benchmark_comment = " (1-16 characters)"; #endif /* We mimic the lengths of cRARk for comparisons */ if (!cpu(device_info[gpu_id])) { #ifndef DEBUG self->params.benchmark_comment = " (length 5)"; #endif self->params.tests = gpu_tests; } //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 3, self, create_clobj, release_clobj, UNICODE_LENGTH + sizeof(cl_int) * 14, 0); //Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, ITERATIONS, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); self->methods.crypt_all = crypt_all; #if defined (_OPENMP) omp_t = omp_get_max_threads(); init_locks(); #endif /* _OPENMP */ if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); unpack_data = mem_calloc(sizeof(unpack_data_t) * omp_t); /* OpenSSL init */ init_aesni(); SSL_load_error_strings(); SSL_library_init(); OpenSSL_add_all_algorithms(); #ifndef __APPLE__ atexit(openssl_cleanup); #endif /* CRC-32 table init, do it before we start multithreading */ { CRC32_t crc; CRC32_Init(&crc); } } static int hexlen(char *q) { char *s = q; size_t len = strlen(q); while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return (len == (size_t)(q - s)) ? (int)(q - s) : -1 - (int)(q - s); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int mode; if (strncmp(ciphertext, "$RAR3$*", 7)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += 7; if (!(ptr = strtok(ctcopy, "*"))) /* -p or -h mode */ goto error; if (hexlen(ptr) != 1) goto error; mode = atoi(ptr); if (mode < 0 || mode > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* salt */ goto error; if (hexlen(ptr) != 16) /* 8 bytes of salt */ goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (mode == 0) { if (hexlen(ptr) != 32) /* 16 bytes of encrypted known plain */ goto error; MEM_FREE(keeptr); return 1; } else { int inlined; long long plen, ulen; if (hexlen(ptr) != 8) /* 4 bytes of CRC */ goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size */ goto error; if (strlen(ptr) > 12) { // pack_size > 1 TB? Really? fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((plen = atoll(ptr)) < 16) goto error; if (!(ptr = strtok(NULL, "*"))) /* unp_size */ goto error; if (strlen(ptr) > 12) { fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((ulen = atoll(ptr)) < 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* inlined */ goto error; if (hexlen(ptr) != 1) goto error; inlined = atoi(ptr); if (inlined < 0 || inlined > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size / archive_name */ goto error; if (inlined) { if (hexlen(ptr) != plen * 2) goto error; } else { FILE *fp; char *archive_name; archive_name = ptr; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno)); goto error; } if (!(ptr = strtok(NULL, "*"))) /* pos */ goto error; /* We could go on and actually try seeking to pos but this is enough for now */ fclose(fp); } if (!(ptr = strtok(NULL, "*"))) /* method */ goto error; } MEM_FREE(keeptr); return 1; error: #ifdef RAR_DEBUG { char buf[68]; strnzcpy(buf, ciphertext, sizeof(buf)); fprintf(stderr, "rejecting %s\n", buf); } #endif MEM_FREE(keeptr); return 0; } static char *get_key(int index) { UTF16 tmpbuf[PLAINTEXT_LENGTH + 1]; memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]); memset(&tmpbuf[saved_len[index] >> 1], 0, 2); return (char*) utf16_to_enc(tmpbuf); } #define ADD_BITS(n) \ { \ if (bits < 9) { \ hold |= ((unsigned int)*next++ << (24 - bits)); \ bits += 8; \ } \ hold <<= n; \ bits -= n; \ } /* * This function is loosely based on JimF's check_inflate_CODE2() from * pkzip_fmt. Together with the other bit-checks, we are rejecting over 96% * of the candidates without resorting to a slow full check (which in turn * may reject semi-early, especially if it's a PPM block) * * Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the * first 2 bits, which have already been decoded, and have told us we had an * LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set. * * RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed * byte order. */ static MAYBE_INLINE int check_huffman(unsigned char *next) { unsigned int bits, hold, i; int left; unsigned int ncount[4]; unsigned char *count = (unsigned char*)ncount; unsigned char bit_length[20]; #ifdef DEBUG unsigned char *was = next; #endif #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED hold = JOHNSWAP(*(unsigned int*)next); #else hold = next[3] + (((unsigned int)next[2]) << 8) + (((unsigned int)next[1]) << 16) + (((unsigned int)next[0]) << 24); #endif next += 4; // we already have the first 32 bits hold <<= 2; // we already processed 2 bits, PPM and keepOldTable bits = 32 - 2; /* First, read 20 pairs of (bitlength[, zerocount]) */ for (i = 0 ; i < 20 ; i++) { int length, zero_count; length = hold >> 28; ADD_BITS(4); if (length == 15) { zero_count = hold >> 28; ADD_BITS(4); if (zero_count == 0) { bit_length[i] = 15; } else { zero_count += 2; while (zero_count-- > 0 && i < sizeof(bit_length) / sizeof(bit_length[0])) bit_length[i++] = 0; i--; } } else { bit_length[i] = length; } } #ifdef DEBUG if (next - was > 16) { fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold); dump_stuff_msg("complete buffer", was, 16); error(); } #endif /* Count the number of codes for each code length */ memset(count, 0, 16); for (i = 0; i < 20; i++) { ++count[bit_length[i]]; } count[0] = 0; if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3]) return 0; /* No codes at all */ left = 1; for (i = 1; i < 16; ++i) { left <<= 1; left -= count[i]; if (left < 0) { return 0; /* over-subscribed */ } } if (left) { return 0; /* incomplete set */ } return 1; /* Passed this check! */ } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { int count = *pcount; size_t *lws = local_work_size ? &local_work_size : NULL; size_t gws = GET_MULTIPLE_OR_BIGGER(count, local_work_size); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key"); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarInit, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarFinal, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel"); // read back aes key & iv BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_key, CL_FALSE, 0, 16 * gws, aes_key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back"); BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_iv, CL_TRUE, 0, 16 * gws, aes_iv, 0, NULL, multi_profilingEvent[6]), "failed in reading iv back"); return count; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; int k; size_t gws = ((count + (local_work_size - 1)) / local_work_size) * local_work_size; if (new_keys) { HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * gws, saved_key, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_key"); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * gws, saved_len, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_len"); new_keys = 0; } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarInit, 1, NULL, &gws, &local_work_size, 0, NULL, firstEvent), "failed in clEnqueueNDRangeKernel"); for (k = 0; k < 16; k++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarFinal, 1, NULL, &gws, &local_work_size, 0, NULL, lastEvent), "failed in clEnqueueNDRangeKernel"); // read back aes key & iv HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_key, CL_FALSE, 0, 16 * gws, aes_key, 0, NULL, NULL), "failed in reading key back"); HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_iv, CL_TRUE, 0, 16 * gws, aes_iv, 0, NULL, NULL), "failed in reading iv back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i16 = index*16; unsigned int inlen = 16; int outlen; EVP_CIPHER_CTX aes_ctx; EVP_CIPHER_CTX_init(&aes_ctx); EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); /* AES decrypt, uses aes_iv, aes_key and blob */ if (cur_file->type == 0) { /* rar-hp mode */ unsigned char plain[16]; outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, inlen); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7); } else { if (cur_file->method == 0x30) { /* stored, not deflated */ CRC32_t crc; unsigned char crc_out[4]; unsigned char plain[0x8010]; unsigned long long size = cur_file->unp_size; unsigned char *cipher = cur_file->blob; /* Use full decryption with CRC check. Compute CRC of the decompressed plaintext */ CRC32_Init(&crc); outlen = 0; while (size > 0x8000) { inlen = 0x8000; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, inlen); CRC32_Update(&crc, plain, outlen > size ? size : outlen); size -= outlen; cipher += inlen; } EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, (size + 15) & ~0xf); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); size += outlen; CRC32_Update(&crc, plain, size); CRC32_Final(crc_out, crc); /* Compare computed CRC with stored CRC */ cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4); } else { const int solid = 0; unpack_data_t *unpack_t; unsigned char plain[20]; cracked[index] = 0; /* Decrypt just one block for early rejection */ outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, 16); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); #if 1 /* Early rejection */ if (plain[0] & 0x80) { // PPM checks here. if (!(plain[0] & 0x20) || // Reset bit must be set (plain[1] & 0x80)) // MaxMB must be < 128 goto bailOut; } else { // LZ checks here. if ((plain[0] & 0x40) || // KeepOldTable can't be set !check_huffman(plain)) // Huffman table check goto bailOut; } #endif /* Reset stuff for full check */ EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); #ifdef _OPENMP unpack_t = &unpack_data[omp_get_thread_num()]; #else unpack_t = unpack_data; #endif unpack_t->max_size = cur_file->unp_size; unpack_t->dest_unp_size = cur_file->unp_size; unpack_t->pack_size = cur_file->pack_size; unpack_t->iv = &aes_iv[i16]; unpack_t->ctx = &aes_ctx; unpack_t->key = &aes_key[i16]; if (rar_unpack29(cur_file->blob, solid, unpack_t)) cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4); bailOut:; } } EVP_CIPHER_CTX_cleanup(&aes_ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_ocl_rar = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif cpu_tests // Changed in init if GPU },{ init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__first_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__first_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__first_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint16) // A*D function (colscale): GB (_AxD__first_uint16) // D*A function (rowscale): GB (_DxB__first_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT16 || GxB_NO_FIRST_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
evolve_cc.c
/* * The Connected Components Hamiltonian split uses a connected component search * on the time step graph of the system to find isolated subsystems with fast * interactions. These subsystems are then evolved at greater accuracy compared * to the rest system. * Equation numbers in comments refer to: J\"anes, Pelupessy, Portegies Zwart, A&A 2014 (doi:10.1051/0004-6361/201423831) */ #include <tgmath.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #include "evolve.h" #include "evolve_kepler.h" #include "evolve_bs.h" //#define CC_DEBUG // perform (time-consuming, but thorough) CC sanity checks #define IS_ZEROSYS(SYS) (((SYS)->n == 0) && ((SYS)->part == NULL) && ((SYS)->last == NULL) && ((SYS)->next_cc == NULL)) #define IS_ZEROSYSs(SYS) (((SYS).n == 0) && ((SYS).part == NULL) && ((SYS).last == NULL) && ((SYS).next_cc == NULL)) #define LOG_CC_SPLIT(C, R) \ { \ LOG("clevel = %d s.n = %d c.n = {", clevel, s.n); \ for (struct sys *_ci = (C); !IS_ZEROSYS(_ci); _ci = _ci->next_cc) printf(" %d ", _ci->n ); \ printf("} r.n = %d\n", (R)->n); \ }; #define LOGSYS_ID(SYS) for (UINT i = 0; i < (SYS).n; i++) { printf("%u ", (SYS).part[i].id); } printf("\n"); #define LOGSYSp_ID(SYS) for (UINT i = 0; i < (SYS)->n; i++) { printf("%u ", (SYS)->part[i].id); } printf("\n"); #define LOGSYSC_ID(SYS) for (struct sys *_ci = &(SYS); !IS_ZEROSYS(_ci); _ci = _ci->next_cc) {printf("{"); for (UINT i = 0; i < _ci->n; i++) {printf("%u ", _ci->part[i].id); } printf("}\t");} printf("\n"); void split_cc(int clevel,struct sys s, struct sys *c, struct sys *r, DOUBLE dt) { /* * split_cc: run a connected component search on sys s with threshold dt, * creates a singly-linked list of connected components c and a rest system r * c or r is set to zerosys if no connected components/rest is found */ int dir=SIGN(dt); dt=fabs(dt); diag->tstep[clevel]++; // not directly comparable to corresponding SF-split statistics struct sys *c_next; c_next = c; *c_next = zerosys; UINT processed = 0; // increase if something is added from the stack to the cc UINT comp_next = 0; // increase to move a particle from stack to cc; points to the first element of the stack UINT comp_size = 0; // amount of particles added to the current cc UINT stack_next = 1; // swap this with s[i] to increase the stack UINT stack_size = 1; // first element of the stack is s[comp_next] // last element of the stack is s[comp_next + stack_size - 1] UINT rest_next = s.n - 1; // swap this to add to the rest-system // find connected components while (processed < s.n) { //LOG("split_cc: searching for connected components: %d / %d\n", processed, s.n); // search for the next connected component while (stack_size > 0) { // iterate over all unvisited elements for (UINT i = stack_next; i <= rest_next; i++) { // if element is connected to the first element of the stack DOUBLE timestep = (DOUBLE) timestep_ij(s.part+comp_next, s.part+i,dir); diag->tcount[clevel]++; if ( timestep <= dt) { // add i to the end of the stack by swapping stack_next and i SWAP( s.part[ stack_next ], s.part[i], struct particle ); stack_next++; stack_size++; } } // pop the stack; add to the connected component comp_size++; comp_next++; stack_size--; } processed += comp_size; // new component is non-trivial: create a new sys if (comp_size > 1) { //LOG("split_cc: found component with size: %d\n", comp_size); // create new component c from u[0] to u[cc_visited - 1] // remove components from u (u.n, u.part) c_next->n = comp_size; c_next->part = &( s.part[ comp_next - comp_size ]); c_next->last = &( s.part[ comp_next - 1 ]); c_next->next_cc = (struct sys*) malloc( sizeof(struct sys) ); c_next = c_next->next_cc; *c_next = zerosys; comp_next = stack_next; comp_size = 0; stack_next = stack_next + 1; stack_size = 1; // new component is trivial: add to rest } else { //LOG("found trivial component; adding to rest\n"); SWAP(s.part[ comp_next - 1 ], s.part[ rest_next ], struct particle ); rest_next--; comp_next = comp_next - 1; comp_size = 0; stack_next = comp_next + 1; stack_size = 1; } } if (processed != s.n) { ENDRUN("split_cc particle count mismatch: processed=%u s.n=%u r->n=%u\n", processed, s.n, r->n); } // create the rest system r->n = (s.n - 1) - rest_next; if (r->n > 0) { r->part = &( s.part[rest_next + 1] ); r->last = s.last; } else { r->part = NULL; r->last = NULL; } //LOG("split_cc: rest system size: %d\n", r->n); } void split_cc_verify(int clevel,struct sys s, struct sys *c, struct sys *r) { /* * split_cc_verify: explicit verification if connected components c and rest system r form a correct * connected components decomposition of the system. */ //LOG("split_cc_verify ping s.n=%d r->n=%d\n", s.n, r->n); //LOG_CC_SPLIT(c, r); UINT pcount_check = 0; for (UINT i = 0; i < s.n; i++) { pcount_check = 0; UINT particle_found = 0; struct particle *p = &( s.part[i] ); for (struct sys *cj = c; !IS_ZEROSYS(cj); cj = cj->next_cc) { pcount_check += cj->n; //LOG("%d\n", pcount_check); // search for p in connected components for (UINT k = 0; k < cj->n; k++) { struct particle * pk = &( cj->part[k] ); // is pk equal to p if (p->id == pk->id) { particle_found += 1; //LOG("split_cc_verify: found in a cc\n"); } } if (& ( cj->part[cj->n - 1] ) != cj->last) { LOG("split_cc_verify: last pointer for c is not set correctly!"); LOG_CC_SPLIT(c, r); ENDRUN("data structure corrupted\n"); } } // search for p in rest for (UINT k = 0; k < r->n; k++) { struct particle * pk = &( r->part[k] ); // is pk equal to p if (p->id == pk->id) { particle_found += 1; } } if (particle_found != 1) { LOG("split_cc_verify: particle %d particle_found=%d ", i, particle_found); LOG_CC_SPLIT(c, r); ENDRUN("data structure corrupted\n"); } } //if (& ( r->part[r->n - 1] ) != r->last) { // LOG("split_cc_verify: last pointer for r is not set correctly! %d %d",&( r->part[r->n - 1] ), r->last); // LOG_CC_SPLIT(c, r); // ENDRUN("data structure corrupted\n"); //} if (pcount_check + r->n != s.n) { LOG("split_cc_verify: particle count mismatch (%d %d)\n", pcount_check + r->n, s.n); LOG_CC_SPLIT(c, r); ENDRUN("data structure corrupted\n"); //ENDRUN("split_cc_verify: particle count mismatch\n"); } else { //LOG("split_cc_verify pong\n"); } //ENDRUN("Fin.\n"); } void split_cc_verify_ts(int clevel,struct sys *c, struct sys *r, DOUBLE dt) { DOUBLE ts_ij; int dir=SIGN(dt); dt=fabs(dt); // verify C-C interactions for (struct sys *ci = c; !IS_ZEROSYS(ci); ci = ci->next_cc) { for (UINT i = 0; i < ci->n; i++) { for (struct sys *cj = c; !IS_ZEROSYS(cj); cj = cj->next_cc) { if (ci == cj) { continue; } for (UINT j = 0; j < cj->n; j++) { ts_ij = (DOUBLE) timestep_ij((*ci).part+i, (*cj).part+j, dir); //LOG("comparing %d %d\n", ci->part[i].id, cj->part[j].id); //LOG("%f %f \n", ts_ij, dt); if (dt > ts_ij) { ENDRUN("split_cc_verify_ts C-C timestep underflow\n"); } } } } } // verify C-R interactions for (struct sys *ci = c; !IS_ZEROSYS(ci); ci = ci->next_cc) { for (UINT i = 0; i < ci->n; i++) { for (UINT j = 0; j < r->n; j++) { ts_ij = (DOUBLE) timestep_ij( (*ci).part+ i, (*r).part+ j,dir); if (ts_ij < dt) { ENDRUN("split_cc_verify_ts C-R timestep underflow\n"); } } } } // verify R interactions for (UINT i = 0; i < r->n; i++) { for (UINT j = 0; j < r->n; j++) { if (i == j) continue; ts_ij = (DOUBLE) timestep_ij( (*r).part+ i, (*r).part+j,dir); if (ts_ij < dt) { ENDRUN("split_cc_verify_ts R-R timestep underflow\n"); } } } } // TODO rename to cc_free_sys? void free_sys(struct sys * s) { if (s==NULL) return; if (s->next_cc != NULL) { free_sys(s->next_cc); } free(s); } DOUBLE sys_forces_max_timestep(struct sys s,int dir) { DOUBLE ts = 0.0; DOUBLE ts_ij; for (UINT i = 0; i < s.n-1; i++) { for (UINT j = i+1; j < s.n; j++) { ts_ij = (DOUBLE) timestep_ij(s.part+ i, s.part+j,dir); // check symm. if (ts_ij >= ts) { ts = ts_ij; }; } } return ts; } #define BS_SUBSYS_SIZE 10 #define TASKCONDITION (nc > 1 && s.n>BS_SUBSYS_SIZE) void evolve_cc2(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt, int inttype, int recenter) { DOUBLE cmpos[3],cmvel[3]; int recentersub=0; struct sys c = zerosys, r = zerosys; CHECK_TIMESTEP(etime,stime,dt,clevel); if (s.n == 2 && (inttype==CCC_KEPLER || inttype==CC_KEPLER)) { evolve_kepler(clevel,s, stime, etime, dt); return; } if (s.n <= BS_SUBSYS_SIZE && (inttype==CCC_BS ||inttype==CC_BS)) { evolve_bs(clevel,s, stime, etime, dt); return; } if (s.n <= BS_SUBSYS_SIZE && (inttype==CCC_BSA ||inttype==CC_BSA)) { evolve_bs_adaptive(clevel,s, stime, etime, dt,1); return; } if(recenter && (inttype==CCC || inttype==CCC_KEPLER || inttype==CCC_BS || inttype==CCC_BSA)) { system_center_of_mass(s,cmpos,cmvel); move_system(s,cmpos,cmvel,-1); } // not actually helpful I think; needs testing #ifdef CC2_SPLIT_SHORTCUTS int dir=SIGN(dt); DOUBLE initial_timestep = sys_forces_max_timestep(s, dir); if(fabs(dt) > initial_timestep) { DOUBLE dt_step = dt; while (fabs(dt_step) > initial_timestep) { dt_step = dt_step / 2; clevel++; } LOG("CC2_SPLIT_SHORTCUTS clevel=%d dt/dt_step=%Le\n", clevel,(long double) (dt / dt_step)); for (DOUBLE dt_now = 0; dir*dt_now < dir*(dt-dt_step/2); dt_now += dt_step) evolve_cc2(clevel,s, dt_now, dt_now + dt_step, dt_step,inttype,0); return; } #endif #ifdef CC2_SPLIT_CONSISTENCY_CHECKS if (clevel == 0) { printf("consistency_checks: ", s.n, clevel); } #endif #ifdef CC2_SPLIT_CONSISTENCY_CHECKS // debug: make a copy of s to verify that the split has been done properly struct sys s_before_split; s_before_split.n = s.n; s_before_split.part = (struct particle*) malloc(s.n*sizeof(struct particle)); s_before_split.last = &( s_before_split.part[s.n - 1] ); s_before_split.next_cc = NULL; memcpy(s_before_split.part, s.part, s.n*sizeof(struct particle)); #endif /* split_cc() decomposes particles in H (eq 25) into: 1) K non-trivial connected components C_1..C_K 2) Rest set R */ split_cc(clevel,s, &c, &r, dt); //if (s.n != c.n) LOG_CC_SPLIT(&c, &r); // print out non-trivial splits #ifdef CC2_SPLIT_CONSISTENCY_CHECKS /* if (s.n != r.n) { LOG("s: "); LOGSYS_ID(s_before_split); LOG("c: "); LOGSYSC_ID(c); LOG("r: "); LOGSYS_ID(r); } */ // verify the split split_cc_verify(clevel,s_before_split, &c, &r); split_cc_verify_ts(clevel,&c, &r, dt); free(s_before_split.part); if (clevel == 0) { printf("ok "); } #endif if (IS_ZEROSYSs(c)) { diag->deepsteps++; diag->simtime+=dt; } // Independently integrate every C_i at reduced pivot time step h/2 (1st time) int nc=0; for (struct sys *ci = &c; !IS_ZEROSYS(ci); ci = ci->next_cc) nc++; if(nc>1 || r.n>0) recentersub=1; for (struct sys *ci = &c; !IS_ZEROSYS(ci); ci = ci->next_cc) { #ifdef _OPENMP if( TASKCONDITION ) { diag->ntasks[clevel]++; diag->taskcount[clevel]+=ci->n; #pragma omp task firstprivate(clevel,ci,stime,dt,recentersub) untied { struct sys lsys; lsys.n=ci->n; struct particle* lpart=(struct particle*) malloc(lsys.n*sizeof(struct particle)); lsys.part=lpart;lsys.last=lpart+lsys.n-1; for(UINT i=0;i<lsys.n;i++) lsys.part[i]=ci->part[i]; evolve_cc2(clevel+1,lsys, stime, stime+dt/2, dt/2,inttype,recentersub); for(UINT i=0;i<lsys.n;i++) ci->part[i]=lpart[i]; free(lpart); } } else #endif { evolve_cc2(clevel+1,*ci, stime, stime+dt/2, dt/2,inttype,recentersub); } } #pragma omp taskwait // Apply drifts and kicks at current pivot time step (eq 30) if(r.n>0) drift(clevel,r, stime+dt/2, dt/2); // drift r, 1st time // kick ci <-> cj (eq 23) for (struct sys *ci = &c; !IS_ZEROSYS(ci); ci = ci->next_cc) { for (struct sys *cj = &c; !IS_ZEROSYS(cj); cj = cj->next_cc) { if (ci != cj) { kick(clevel,*ci, *cj, dt); //kick(*cj, *ci, dt); } } } // kick c <-> rest (eq 24) if(r.n>0) for (struct sys *ci = &c; !IS_ZEROSYS(ci); ci = ci->next_cc) { kick(clevel,r, *ci, dt); kick(clevel,*ci, r, dt); } if(r.n>0) kick(clevel,r, r, dt); // kick rest (V_RR) if(r.n>0) drift(clevel,r, etime, dt/2); // drift r, 2nd time // Independently integrate every C_i at reduced pivot time step h/2 (2nd time, eq 27) for (struct sys *ci = &c; !IS_ZEROSYS(ci); ci = ci->next_cc) { #ifdef _OPENMP if (TASKCONDITION) { diag->ntasks[clevel]++; diag->taskcount[clevel]+=ci->n; #pragma omp task firstprivate(clevel,ci,stime,etime,dt,recentersub) untied { struct sys lsys; lsys.n=ci->n; struct particle* lpart=(struct particle*) malloc(lsys.n*sizeof(struct particle)); lsys.part=lpart;lsys.last=lpart+lsys.n-1; for(UINT i=0;i<lsys.n;i++) lsys.part[i]=ci->part[i]; evolve_cc2(clevel+1,lsys, stime+dt/2, etime, dt/2,inttype,recentersub); for(UINT i=0;i<lsys.n;i++) ci->part[i]=lpart[i]; free(lpart); } } else #endif { evolve_cc2(clevel+1,*ci, stime, stime+dt/2, dt/2,inttype,recentersub); } } #pragma omp taskwait if(recenter && (inttype==CCC || inttype==CCC_KEPLER || inttype==CCC_BS || inttype==CCC_BSA)) { for(int i=0;i<3;i++) cmpos[i]+=cmvel[i]*dt; move_system(s,cmpos,cmvel,1); } free_sys(c.next_cc); } #undef TASKCONDITION
GB_unaryop__ainv_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_int64 // op(A') function: GB_tran__ainv_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_int64 ( uint32_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <vector> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename DType> MSHADOW_XINLINE static DType Map(DType a, DType b) { return DType(expf(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static DType Map(DType a, DType b) { return DType(a - logf(b)); } }; template<typename OP, typename DType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, DType *out, Shape<ndim> shape, int axis) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(N); ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = in[base]; for (index_t j = 1; j < M; ++j) { if (mmax < in[base + j*sa]) mmax = in[base + j*sa]; } DType sum = DType(0); for (index_t j = 0; j < M; ++j) { sum += std::exp(in[base + j*sa] - mmax); } for (index_t j = 0; j < M; ++j) { out[base + j*sa] = OP::Map(in[base + j*sa] - mmax, sum); } } } struct softmax_bwd { template<typename DType> MSHADOW_XINLINE static DType Map(DType ograd, DType out, DType sum) { return DType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename DType> MSHADOW_XINLINE static DType Map(DType ograd, DType out, DType sum) { return DType(ograd - expf(out)*sum); } }; template<typename OP1, typename OP2, typename DType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, DType *out, DType *ograd, DType *igrad, Shape<ndim> shape, int axis) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(N); ++i) { index_t base = unravel_dot(i, sshape, stride); DType sum = DType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } for (index_t j = 0; j < M; ++j) { igrad[base + j*sa] = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); } } } #ifdef __CUDACC__ template<int x_bits, typename OP, typename DType, int ndim> __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride) { const unsigned x_size = 1 << x_bits; __shared__ DType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { red::maximum::Reduce(smem[x], in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); DType smax = smem[0]; __syncthreads(); red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { red::sum::Reduce(smem[x], static_cast<DType>(expf(in[base + i*sa] - smax))); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); DType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { out[base + i*sa] = OP::Map(in[base + i*sa] - smax, ssum); } } template<typename OP, typename DType, int ndim> inline void Softmax(Stream<gpu> *s, DType *in, DType *out, Shape<ndim> shape, int axis) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; softmax_compute_kernel<x_bits, OP, DType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, M, axis, sshape, stride); } template<int x_bits, typename OP1, typename OP2, typename DType, int ndim> __global__ void softmax_gradient_kernel(DType *out, DType *ograd, DType *igrad, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride) { const unsigned x_size = 1 << x_bits; __shared__ DType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { red::sum::Reduce(smem[x], OP1::Map(ograd[base + i*sa], out[base + i*sa])); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); DType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { igrad[base + i*sa] = OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); } } template<typename OP1, typename OP2, typename DType, int ndim> inline void SoftmaxGrad(Stream<gpu> *s, DType *out, DType *ograd, DType *igrad, Shape<ndim> shape, int axis) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; softmax_gradient_kernel<x_bits, OP1, OP2, DType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, M, axis, sshape, stride); } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); } }; template<typename xpu, typename OP> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (shape.ndim() == 2) { Softmax<OP>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), shape.get<2>(), axis); } else { Softmax<OP>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), shape.get<3>(), axis); } }); } template<typename xpu, typename OP1, typename OP2> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2>(ctx.get_stream<xpu>(), inputs[1].dptr<DType>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), shape.get<2>(), axis); } else { SoftmaxGrad<OP1, OP2>(ctx.get_stream<xpu>(), inputs[1].dptr<DType>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), shape.get<3>(), axis); } }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
GB_binop__lxor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int64) // A*D function (colscale): GB (_AxD__lxor_int64) // D*A function (rowscale): GB (_DxB__lxor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int64) // C=scalar+B GB (_bind1st__lxor_int64) // C=scalar+B' GB (_bind1st_tran__lxor_int64) // C=A+scalar GB (_bind2nd__lxor_int64) // C=A'+scalar GB (_bind2nd_tran__lxor_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT64 || GxB_NO_LXOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if (IsGrayImageType(type)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertAdobe98ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertDisplayP3ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertProPhotoToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToAdobe98(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToAdobe98(X,Y,Z,r,g,b); } static void ConvertRGBToDisplayP3(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToDisplayP3(X,Y,Z,r,g,b); } static void ConvertRGBToProPhoto(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToProPhoto(X,Y,Z,r,g,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const double red,const double green, const double blue,const IlluminantType illuminant,double *L,double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void inline ConvertXYZToJzazbz(const double X,const double Y, const double Z,const double white_luminance,double *Jz,double *az,double *bz) { #define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */ #define Jzazbz_g 0.66 #define Jzazbz_c1 (3424.0/4096.0) #define Jzazbz_c2 (2413.0/128.0) #define Jzazbz_c3 (2392.0/128.0) #define Jzazbz_n (2610.0/16384.0) #define Jzazbz_p (1.7*2523.0/32.0) #define Jzazbz_d (-0.56) #define Jzazbz_d0 (1.6295499532821566e-11) double gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1)); Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1)); Zp=Z; L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp; M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp; S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp; gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n); Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n); Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n); Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); Iz=0.5*Lp+0.5*Mp; *az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5; *bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5; *Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0; } static void inline ConvertJzazbzToXYZ(const double Jz,const double az, const double bz,const double white_luminance,double *X,double *Y,double *Z) { double azz, bzz, gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; gamma=Jz+Jzazbz_d0; Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0); azz=az-0.5; bzz=bz-0.5; Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz; Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz; Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz; gamma=pow(Lp,1.0/Jzazbz_p); L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Mp,1.0/Jzazbz_p); M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Sp,1.0/Jzazbz_p); S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S; Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S; Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S; *X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b; *Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g; *Z=Zp; } static void ConvertRGBToJzazbz(const double red,const double green, const double blue,const double white_luminance,double *Jz,double *az, double *bz) { double X, Y, Z; ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z); ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz); } static void ConvertJzazbzToRGB(const double Jz,const double az, const double bz,const double white_luminance,double *red,double *green, double *blue) { double X, Y, Z; ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,blue,green); } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158* DecodePixelGamma(GetPixelGreen(image,q))+0.072186* DecodePixelGamma(GetPixelBlue(image,q)); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case Adobe98Colorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from sRGB to target colorspace. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case Adobe98Colorspace: { ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z); break; } case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case DisplayP3Colorspace: { ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case JzazbzColorspace: { ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case ProPhotoColorspace: { ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002* PerceptibleReciprocal(film_gamma)))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); x_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].x=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; z_map[i].y=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,sRGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { MagickBooleanType is_bilevel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageMonochrome(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); is_bilevel=IdentifyImageMonochrome(image,exception); if (is_bilevel == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158* EncodePixelGamma(GetPixelGreen(image,q))+0.072186* EncodePixelGamma(GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case Adobe98Colorspace: case CMYColorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from source colorspace to sRGB. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case Adobe98Colorspace: { ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue); break; } case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case DisplayP3Colorspace: { ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case JzazbzColorspace: { ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case ProPhotoColorspace: { ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma))-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformsRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
Loop.c
#include "Loop.h" #include "omp.h" #include <stdio.h> #include <stdlib.h> void loop_free( void *p) { free(p); } void* loop_malloc( unsigned n) { void *i = malloc( sizeof(int) * n ); return i; } void loop_exec( void(*loop_kernal)(void* , unsigned, unsigned), void* arg, unsigned arg_bytes, unsigned n) { #pragma omp parrallel shared( arg ) { #pragma omp for for( unsigned i = 0 ; i<n ; i++) { (*loop_kernal)(arg, arg_bytes, i); //Problem of how do I know where I am??? } } } void GENDATA( void *p) { unsigned a[] = {1,1}; p=a; }
pyfr_gemm_cm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <assert.h> #include <stdio.h> #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) # include <mkl.h> #else /* prototypes for GEMM */ void my_dgemm( const int* M, const int* N, const int* K, const double* alpha, const double* a, const int* LDA, const double* b, const int* LDB, const double* beta, double* c, const int* LDC ) { const int my_M = *M; const int my_N = *N; const int my_K = *K; const int my_LDA = *LDA; const int my_LDB = *LDB; const int my_LDC = *LDC; const float my_alpha = (float)*alpha; const float my_beta = (float)*beta; int m = 0, n = 0, k = 0; for ( n = 0; n < my_N; ++n ) { for ( m = 0; m < my_M; ++m ) { c[(n * my_LDC) + m] = my_beta * c[(n * my_LDC) + m]; for ( k = 0; k < my_K; ++k ) { c[(n * my_LDC) + m] += my_alpha * a[(k * my_LDA) + m] * b[(n * my_LDB) + k]; } } } } #endif int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; libxsmm_timer_tickint l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) char transa = 'N', transb = 'N'; #endif int l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { assert(0 < argc); fprintf(stderr, "Invalid: try %s M N K reps\n", argv[0]); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); /* this is col-major what you want to use for the sizes in question */ lda = m; ldb = k; ldc = m; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisible by %i\n", nblock); exit(-1); } a = (double*)libxsmm_aligned_malloc(sizeof(double)*lda*k, 64); b = (double*)libxsmm_aligned_malloc(sizeof(double)*ldb*n, 64); c1 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*n, 64); c2 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*n, 64); #pragma omp parallel for for (i = 0; i < lda*k; i++) { a[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldb*n; i++) { b[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } /* JIT Kernel */ kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op ); /* init MKL */ #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #else my_dgemm(&m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #endif #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } l_start = libxsmm_timer_tick(); for ( j = 0; j < reps; j++ ) { #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #else my_dgemm(&m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #endif } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); l_start = libxsmm_timer_tick(); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( a, &b[ldb*i], &c2[ldc*i] ); } l_end = libxsmm_timer_tick(); } l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); /* test result */ double max_error = 0.0; for ( i = 0; i < ldc*n; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); return EXIT_SUCCESS; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { const Image *next; FxInfo *fx_info; ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"median",6) == 0) { double median; (void) GetImageMedian(image,&median,exception); statistic=median; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace((int) ((unsigned char) c)) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta) { if (alpha < beta) return(FxGCD(beta,alpha)); if (fabs(beta) < 0.001) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta))); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); (void) StripMagickString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"median",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; const char *subexpression; int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha((int) ((unsigned char) c)) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); if (IsNaN(alpha)) FxReturn(alpha); gcd=FxGCD(alpha,*beta); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; /* Parse if(condition test, true-expression, false-expression). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1, beta,exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
target_x86.c
/***************************************************************************** * * target_x86.c * * Implementation is serial or OpenMP. * * Edinburgh Soft Matter and Statistical Physics Group and * Edinburgh Parallel Computing Centre * * (c) 2018 The University of Edinburgh * * Contributing authors: * Alan Gray (Late of this parish) * Kevin Stratford ([email protected]) * *****************************************************************************/ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "target.h" /* Globally reserved names. */ dim3 threadIdx; dim3 blockIdx; dim3 gridDim = {1, 1, 1}; dim3 blockDim = {1, 1, 1}; static tdpError_t lastError = tdpSuccess; static char lastErrorString[BUFSIZ] = ""; static int staticStream; /* Utilities */ static void error_boke(int line, tdpError_t error) { fprintf(stderr, "File %s line %d error %s\n", __FILE__, line, tdpGetErrorName(error)); exit(0); } #define errors_make_me_boke(error) error_boke(__LINE__, error) #define error_return_if(expr, error) \ do { if ((expr)) { \ lastError = error; \ errors_make_me_boke(error); \ return error; \ } \ } while(0) #define error_return(error) \ error_return_if(1, error) void tdpErrorHandler(tdpError_t ifail, const char * file, int line, int fatal) { if (ifail != tdpSuccess) { printf("tdpErrorHandler: %s:%d %s %s\n", file, line, tdpGetErrorName(ifail), tdpGetErrorString(ifail)); if (fatal) exit(ifail); } return; } __host__ void tdp_x86_prelaunch(dim3 nblocks, dim3 nthreads) { gridDim = nblocks; blockDim = nthreads; /* sanity checks on user settings here... */ gridDim.x = 1; /* Assert this for host implementation */ /* In case we request fewer threads than are available: */ omp_set_num_threads(blockDim.x*blockDim.y*blockDim.z); /* Check blockDim, blockIdx ? */ threadIdx.x = omp_get_thread_num(); threadIdx.y = 1; threadIdx.z = 1; return; } void tdp_x86_postlaunch(void) { /* Reset the default number of threads. */ omp_set_num_threads(omp_get_max_threads()); return; } /***************************************************************************** * * tdpDeviceGetCacheConfig * *****************************************************************************/ tdpError_t tdpDeviceGetCacheConfig(tdpFuncCache * cacheConfig) { *cacheConfig = tdpFuncCachePreferNone; return tdpSuccess; } /***************************************************************************** * * tdpDeviceSetCacheConfig * *****************************************************************************/ tdpError_t tdpDeviceSetCacheConfig(tdpFuncCache cacheConfig) { /* No op. */ return tdpSuccess; } /***************************************************************************** * * tdpDeviceSynchronize * *****************************************************************************/ tdpError_t tdpDeviceSynchronize(void) { /* do nothing */ return tdpSuccess; } /***************************************************************************** * * tdpFree * *****************************************************************************/ tdpError_t tdpFree(void * devPtr) { error_return_if(devPtr == NULL, tdpErrorInvalidDevicePointer); free(devPtr); return tdpSuccess; } /***************************************************************************** * * tdpFreeHost * *****************************************************************************/ tdpError_t tdpFreeHost(void * ptr) { free(ptr); return tdpSuccess; } /***************************************************************************** * * tdpDeviceGetAttribute * *****************************************************************************/ tdpError_t tdpDeviceGetAttribute(int * value, tdpDeviceAttr attr, int device) { assert(value); assert(0); /* Return some useful information please */ return tdpSuccess; } /***************************************************************************** * * Return id of device currently being used. * *****************************************************************************/ tdpError_t tdpGetDevice(int * device) { assert(device); *device = 0; return tdpSuccess; } /***************************************************************************** * * tdpGetDeviceCount * * Return number of available devices * *****************************************************************************/ tdpError_t tdpGetDeviceCount(int * device) { *device = 0; #ifdef FAKE_DEVICE /* "Fake" device */ *device = 1; #endif /* Strictly, we should return tdpErrorInsufficientDriver or ... */ return tdpErrorNoDevice; } /***************************************************************************** * * tdpGetDeviceProperties * *****************************************************************************/ tdpError_t tdpGetDeviceProperties(struct tdpDeviceProp * prop, int device) { prop->maxThreadsPerBlock = TARGET_MAX_THREADS_PER_BLOCK; prop->maxThreadsDim[0] = TARGET_MAX_THREADS_PER_BLOCK; prop->maxThreadsDim[1] = 1; prop->maxThreadsDim[2] = 1; return tdpSuccess; } /***************************************************************************** * * tdpSetDevice * *****************************************************************************/ tdpError_t tdpSetDevice(int device) { error_return_if(device < 0, tdpErrorInvalidDevice); return tdpSuccess; } /***************************************************************************** * * tdpGetErrorName * *****************************************************************************/ #define CASE_RETURN(x) case(x): return #x; break const char * tdpGetErrorName(tdpError_t error) { switch (error) { CASE_RETURN(tdpSuccess); CASE_RETURN(tdpErrorMissingConfiguration); CASE_RETURN(tdpErrorMemoryAllocation); CASE_RETURN(tdpErrorInitializationError); CASE_RETURN(tdpErrorLaunchFailure); CASE_RETURN(tdpErrorLaunchTimeout); CASE_RETURN(tdpErrorLaunchOutOfResources); CASE_RETURN(tdpErrorInvalidDeviceFunction); CASE_RETURN(tdpErrorInvalidSymbol); CASE_RETURN(tdpErrorInvalidDevicePointer); CASE_RETURN(tdpErrorInvalidResourceHandle); default: fprintf(stderr, "Unrecognised error code was %d\n", error); } return "Unrecognised error code"; } /***************************************************************************** * * tdpGetErrorString * *****************************************************************************/ const char * tdpGetErrorString(tdpError_t ifail) { return ""; } /***************************************************************************** * * tdpPeekAtLastError * *****************************************************************************/ tdpError_t tdpPeekAtLastError(void) { return lastError; } /***************************************************************************** * * tdpGetLastError * *****************************************************************************/ tdpError_t tdpGetLastError(void) { tdpError_t last = lastError; lastError = tdpSuccess; strcpy(lastErrorString, ""); return last; } /***************************************************************************** * * tdpGetSymbolAddress * *****************************************************************************/ tdpError_t tdpGetSymbolAddress(void ** devptr, const void * symbol) { assert(devptr); assert(symbol); error_return_if(symbol == NULL, tdpErrorInvalidSymbol); *devptr = (void *) symbol; return tdpSuccess; } /***************************************************************************** * * tdpHostAlloc * *****************************************************************************/ tdpError_t tdpHostAlloc(void ** phost, size_t size, unsigned int flags) { void * ptr = NULL; error_return_if(phost == NULL, tdpErrorInvalidValue); switch (flags) { case tdpHostAllocDefault: case tdpHostAllocPortable: case tdpHostAllocMapped: case tdpHostAllocWriteCombined: ptr = malloc(size); error_return_if(ptr == NULL, tdpErrorMemoryAllocation); *phost = ptr; break; default: error_return(tdpErrorInvalidValue); } return tdpSuccess; } /***************************************************************************** * * tdpMalloc * *****************************************************************************/ tdpError_t tdpMalloc(void ** devPtr, size_t size) { assert(devPtr); *devPtr = malloc(size); error_return_if(*devPtr == NULL, tdpErrorMemoryAllocation); return tdpSuccess; } /***************************************************************************** * * tdpMallocManaged * *****************************************************************************/ tdpError_t tdpMallocManaged(void ** devptr, size_t size, unsigned int flag) { void * ptr = NULL; unsigned int valid = (tdpMemAttachGlobal | tdpMemAttachHost); assert(devptr); error_return_if(size < 1, tdpErrorInvalidValue); error_return_if((flag & (~valid)), tdpErrorInvalidValue); ptr = malloc(size); error_return_if(ptr == NULL, tdpErrorMemoryAllocation); *devptr = ptr; return tdpSuccess; } /***************************************************************************** * * tdpMemcpy * *****************************************************************************/ tdpError_t tdpMemcpy(void * dst, const void * src, size_t count, tdpMemcpyKind kind) { assert(dst); assert(src); error_return_if(count < 1, tdpErrorInvalidValue); switch (kind) { case tdpMemcpyHostToDevice: error_return_if(dst == NULL, tdpErrorInvalidDevicePointer); memcpy(dst, src, count); break; case tdpMemcpyDeviceToHost: error_return_if(src == NULL, tdpErrorInvalidDevicePointer); memcpy(dst, src, count); break; case tdpMemcpyHostToHost: memcpy(dst, src, count); break; case tdpMemcpyDeviceToDevice: memcpy(dst, src, count); break; case tdpMemcpyDefault: default: error_return(tdpErrorInvalidMemcpyDirection); } return tdpSuccess; } /***************************************************************************** * * tdpMemcpyFromSymbol * *****************************************************************************/ tdpError_t tdpMemcpyFromSymbol(void * dst, const void * symbol, size_t count, size_t offset, tdpMemcpyKind kind) { assert(dst); assert(symbol); error_return_if(count < 1, tdpErrorInvalidValue); error_return_if(offset != 0, tdpErrorInvalidValue); switch (kind) { case tdpMemcpyDefault: case tdpMemcpyDeviceToHost: error_return_if(symbol == NULL, tdpErrorInvalidSymbol); memcpy(dst, symbol, count); break; case tdpMemcpyDeviceToDevice: error_return_if(dst == NULL, tdpErrorInvalidDevicePointer); error_return_if(symbol == NULL, tdpErrorInvalidSymbol); memcpy(dst, symbol, count); break; case tdpMemcpyHostToDevice: assert(0); case tdpMemcpyHostToHost: assert(0); default: error_return(tdpErrorInvalidMemcpyDirection); } return tdpSuccess; } /***************************************************************************** * * tdpMemcpyToSymbol * * CUDA wants "const void * symbol", but this is avoided as we need * a memset(void * dst, const void * src, ...) . * *****************************************************************************/ tdpError_t tdpMemcpyToSymbol(void * symbol, const void * src, size_t count, size_t offset, tdpMemcpyKind kind) { assert(symbol); assert(src); error_return_if(count < 1, tdpErrorInvalidValue); error_return_if(offset != 0, tdpErrorInvalidValue); switch (kind) { case tdpMemcpyDefault: case tdpMemcpyHostToDevice: error_return_if(symbol == NULL, tdpErrorInvalidSymbol); memcpy(symbol, src, count); break; case tdpMemcpyDeviceToDevice: error_return_if(src == NULL, tdpErrorInvalidDevicePointer); memcpy(symbol, src, count); break; case tdpMemcpyDeviceToHost: case tdpMemcpyHostToHost: default: error_return(tdpErrorInvalidMemcpyDirection); } return tdpSuccess; } /***************************************************************************** * * tdpMemset * *****************************************************************************/ tdpError_t tdpMemset(void * devPtr, int value, size_t count) { error_return_if(devPtr == NULL, tdpErrorInvalidDevicePointer); error_return_if(value < 0, tdpErrorInvalidValue); error_return_if(value > 255, tdpErrorInvalidValue); memset(devPtr, value, count); return tdpSuccess; } /***************************************************************************** * * tdpStreamCreate * *****************************************************************************/ tdpError_t tdpStreamCreate(tdpStream_t * stream) { error_return_if(stream == NULL, tdpErrorInvalidValue); *stream = &staticStream; return tdpSuccess; } /***************************************************************************** * * tdpStreamDestroy * *****************************************************************************/ tdpError_t tdpStreamDestroy(tdpStream_t stream) { error_return_if(stream != &staticStream, tdpErrorInvalidResourceHandle); return tdpSuccess; } /***************************************************************************** * * tdpStreamDestroy * *****************************************************************************/ tdpError_t tdpStreamSynchronize(tdpStream_t stream) { error_return_if(stream != &staticStream, tdpErrorInvalidResourceHandle); /* Success */ return tdpSuccess; } /***************************************************************************** * * tdpMemcpyAsync * *****************************************************************************/ tdpError_t tdpMemcpyAsync(void * dst, const void * src, size_t count, tdpMemcpyKind kind, tdpStream_t stream) { /* Just ignore the stream argument and copy immediately */ return tdpMemcpy(dst, src, count, kind); } static int int_max(int a, int b) {return (a > b) ?a :b;} static int int_min(int a, int b) {return (a < b) ?a :b;} /***************************************************************************** * * tdpAtomicAddInt * *****************************************************************************/ __device__ int tdpAtomicAddInt(int * sum, int val) { int old; assert(sum); #ifdef _OPENMP /* Some compilers dislike capture: use #pragma omp critical(atomicAddInt) */ #pragma omp atomic capture { old = *sum; *sum += val; } #else old = *sum; *sum += val; #endif return old; } /***************************************************************************** * * tdpAtomicMaxInt * * maxval expected to be __shared__ * *****************************************************************************/ __device__ int tdpAtomicMaxInt(int * maxval, int val) { int old; assert(maxval); #ifdef _OPENMP /* Ug. */ #pragma omp critical (atomicMaxInt) { old = *maxval; *maxval = int_max(*maxval, val); } #else old = *maxval; *maxval = int_max(*maxval, val); #endif return old; } /***************************************************************************** * * tdpAtomicMinInt * *****************************************************************************/ __device__ int tdpAtomicMinInt(int * minval, int val) { int old; assert(minval); #ifdef _OPENMP #pragma omp critical (atomicMinInt) { old = *minval; *minval = int_min(*minval, val); } #else old = *minval; *minval = int_min(*minval, val); #endif return old; } /***************************************************************************** * * tdpAtomicAddDouble * *****************************************************************************/ __device__ double tdpAtomicAddDouble(double * sum, double val) { double old; assert(sum); #ifdef _OPENMP /* Some compilers dislike capture: use #pragma omp critical(atomicAddD) */ #pragma omp atomic capture { old = *sum; *sum += val; } #else old = *sum; *sum += val; #endif return old; } static double double_max(double a, double b) {return (a > b) ?a :b;} static double double_min(double a, double b) {return (a < b) ?a :b;} /***************************************************************************** * * tdpAtomicMaxDouble * *****************************************************************************/ __device__ double tdpAtomicMaxDouble(double * maxval, double val) { double old; assert(maxval); #ifdef _OPENMP #pragma omp critical (atomicMaxDouble) { old = *maxval; *maxval = double_max(*maxval, val); } #else old = *maxval; *maxval = double_max(*maxval, val); #endif return old; } /***************************************************************************** * * tdpAtomicMinDouble * *****************************************************************************/ __device__ double tdpAtomicMinDouble(double * minval, double val) { double old; assert(minval); #ifdef _OPENMP #pragma omp critical (atomicMinDouble) { old = *minval; *minval = double_min(*minval, val); } #else old = *minval; *minval = double_min(*minval, val); #endif return old; } /***************************************************************************** * * tdpAtomicBlockAddInt * * See, e.g., * https://devblogs.nvidia.com/parallelforall/ * faster-parallel-reductions-kepler/ * * The partial sums partsum must be __shared__; they are destroyed * on exit. * The result is only significant at thread zero. * *****************************************************************************/ __device__ int tdpAtomicBlockAddInt(int * partsum) { #ifdef _OPENMP int istr; int nblock; int nthread = omp_get_num_threads(); int idx = omp_get_thread_num(); nblock = pow(2, ceil(log(1.0*nthread)/log(2))); for (istr = nblock/2; istr > 0; istr /= 2) { #pragma omp barrier if (idx < istr && idx + istr < nthread) { partsum[idx] += partsum[idx + istr]; } } #endif return partsum[0]; } /***************************************************************************** * * tdpAtomicBlockAddDouble * *****************************************************************************/ __device__ double tdpAtomicBlockAddDouble(double * partsum) { #ifdef _OPENMP int istr; int nblock; int nthread = omp_get_num_threads(); int idx = omp_get_thread_num(); nblock = pow(2, ceil(log(1.0*nthread)/log(2))); for (istr = nblock/2; istr > 0; istr /= 2) { #pragma omp barrier if (idx < istr && idx + istr < nthread) { partsum[idx] += partsum[idx + istr]; } } #endif return partsum[0]; }
matmul.c
#include <stdlib.h> #include <sys/time.h> #include <stdio.h> #include <omp.h> //#define _OPENACCM #ifdef _OPENACCM #include <openacc.h> #endif #ifndef _N_ #define _N_ 512 #endif int N = _N_; int M = _N_; int P = _N_; double my_timer () { struct timeval time; gettimeofday (&time, 0); return time.tv_sec + time.tv_usec / 1000000.0; } void MatrixMultiplication_openacc(float * a,float * b, float * c) { int i, j, k ; #ifdef _OPENACCM acc_init(acc_device_default); #endif #pragma acc data copyout(a[0:(M*N)]), copyin(b[0:(M*P)],c[0:(P*N)]) { #pragma acc kernels loop independent gang for (i=0; i<M; i++){ #pragma acc loop worker for (j=0; j<N; j++) { float sum = 0.0 ; #pragma acc loop seq for (k=0; k<P; k++) { sum += b[i*P+k]*c[k*N+j] ; } a[i*N+j] = sum ; } } } #ifdef _OPENACCM acc_shutdown(acc_device_default); #endif } void MatrixMultiplication_openmp(float * a,float * b, float * c) { int i, j, k ; int chunk = N/4; #pragma omp parallel shared(a,b,c,chunk) private(i,j,k) { #ifdef _OPENMP if(omp_get_thread_num() == 0) { printf("Number of OpenMP threads %d\n", omp_get_num_threads()); } #endif #pragma omp for for (i=0; i<M; i++){ for (j=0; j<N; j++) { float sum = 0.0 ; for (k=0; k<P; k++) sum += b[i*P+k]*c[k*N+j] ; a[i*N+j] = sum ; } } } } int main() { float *a, *b, *c; int i; double elapsed_time; a = (float *) malloc(M*N*sizeof(float)); b = (float *) malloc(M*P*sizeof(float)); c = (float *) malloc(P*N*sizeof(float)); for (i = 0; i < M*N; i++) { a[i] = (float) 0.0; } for (i = 0; i < M*P; i++) { b[i] = (float) i; } for (i = 0; i < P*N; i++) { c[i] = (float) 1.0; } elapsed_time = my_timer(); MatrixMultiplication_openmp(a,b,c); elapsed_time = my_timer() - elapsed_time; printf("CPU Elapsed time = %lf sec\n", elapsed_time); elapsed_time = my_timer(); #pragma aspen modelregion MatrixMultiplication_openacc(a,b,c); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time = %lf sec\n", elapsed_time); free(a); free(b); free(c); return 0; }
dd_gurobi.h
// Header for LP computation of the temporal upper bound using Gurobi. #pragma once #include <gurobi_c++.h> #include <map> #include <random> #include <sstream> #include <string> #include <utility> #include <tuple> #include <vector> const int64_t MAX_CONSTRAINTS = 5000000000L; std::string LP_name(const std::string &prefix, const std::initializer_list<int> &vertices) { std::ostringstream out; out << prefix; if (vertices.size() > 0) { out << "_{"; for (auto v : vertices) { out << std::to_string(v) << ","; } out.seekp(-1, std::ios_base::end), out << "}"; } return out.str(); } inline int LP_get_variable_index(const int &u, const int &v, const int &n, const int &n0) { return (u - n0) * (n - n0) + (v - n0); } inline int LP_get_variable_index( const int &u, const int &i, const int &v, const int &j, const int &n, const int &n0) { int x = LP_get_variable_index(u, i, n, n0), y = LP_get_variable_index(v, j, n, n0); return LP_get_variable_index(n, n0, n, n0) + x * (n - n0) * (n - n0) + y; } inline void add_asymmetry_constraint( GRBModel *LP, const std::vector<GRBVar> &vars, const int &n, const int &n0, const GRBLinExpr &s) { #pragma omp parallel for for (int i = n0; i < n; i++) { for (int j = i + 1; j < n; j++) { #pragma omp critical { GRBLinExpr row = vars[LP_get_variable_index(i, j, n, n0)] + vars[LP_get_variable_index(j, i, n, n0)]; LP->addConstr(row, GRB_LESS_EQUAL, s, LP_name("A", { i, j })); } } } } inline void add_transitivity_constraint( GRBModel *LP, const std::vector<GRBVar> &vars, const int &n, const int &n0, const GRBLinExpr &s, const int &i, const int &j, const int &k) { #pragma omp critical { GRBLinExpr row = vars[LP_get_variable_index(i, j, n, n0)] + vars[LP_get_variable_index(j, k, n, n0)] - vars[LP_get_variable_index(i, k, n, n0)]; LP->addConstr(row, GRB_LESS_EQUAL, s, LP_name("T", { i, j, k })); } } inline void add_density_constraint( GRBModel *LP, const std::vector<GRBVar> &vars, const int &n, const int &n0, const double &density) { GRBLinExpr row = 0; for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { if (i != j) { row += vars[LP_get_variable_index(i, j, n, n0)]; } } } LP->addConstr(row, GRB_LESS_EQUAL, density, LP_name("D", {})); } std::map<std::pair<int, int>, double> retrieve_solution( GRBModel *LP, const int &n, const int &n0, const double &s) { std::map<std::pair<int, int>, double> solution; for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { if (i == j) { continue; } double y_ij = vars[LP_get_variable_index(i, j, n, n0)].get(GRB_DoubleAttr_X); solution.insert(std::make_pair(std::make_pair(i, j), y_ij / s)); } } return solution; } std::tuple<double, std::map<std::pair<int, int>, double>> LP_ordering_solve( const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0, const double &epsilon, const bool get_solution = false) { try { GRBEnv* environment = new GRBEnv(); GRBModel *LP = new GRBModel(*environment); LP->set(GRB_StringAttr_ModelName, "Solve " + std::to_string(epsilon)); LP->set(GRB_IntAttr_ModelSense, GRB_MAXIMIZE); double density = epsilon * (n - n0) * (n - n0 - 1) / 2; // Objective function std::vector<GRBVar> vars((n - n0) * (n - n0) + 1); int s_index = (n - n0) * (n - n0); for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { auto index = LP_get_variable_index(i, j, n, n0); if (i != j) { const auto &p_ij = p_uv.find(std::make_pair(i, j)); vars[index] = LP->addVar( 0.0, 1.0, (p_ij != p_uv.end()) ? static_cast<double>(p_ij->second) : 0.0, GRB_CONTINUOUS, LP_name("y", {i, j})); } else { vars[index] = LP->addVar(0.0, 0.0, 0.0, GRB_CONTINUOUS, LP_name("y", {i, j})); } } } vars[s_index] = LP->addVar(0.0, 1 / density, 0.0, GRB_CONTINUOUS, "s"); // Antisymmetry add_asymmetry_constraint(LP, vars, n, n0, vars[s_index]); // Transitivity if (MAX_CONSTRAINTS >= pow(n - n0, 3.0)) { #pragma omp parallel for for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { for (int k = n0; k < n; k++) { if (i != j && j != k && i != k) { add_transitivity_constraint(LP, vars, n, n0, vars[s_index], i, j, k); } } } } } else { std::random_device device; std::mt19937 generator(device()); std::uniform_int_distribution<int> index_distribution(n0, n - 1); #pragma omp parallel for for (int64_t constraint = 0; constraint < MAX_CONSTRAINTS; constraint++) { int i = index_distribution(generator), j = index_distribution(generator), k = index_distribution(generator); if (i == j || j == k || i == k) { continue; } add_transitivity_constraint(LP, vars, n, n0, vars[s_index], i, j, k); } } // Density add_density_constraint(LP, vars, n, n0, 1.0); LP->set(GRB_IntParam_OutputFlag, 0); LP->optimize(); int status = LP->get(GRB_IntAttr_Status); if (status == GRB_OPTIMAL) { double objective = LP->get(GRB_DoubleAttr_ObjVal); std::map<std::pair<int, int>, double> solution; if (get_solution) { double s = vars[s_index].get(GRB_DoubleAttr_X); solution = retrieve_solution(LP, n, n0, s); } delete LP, delete environment; return std::make_tuple(objective, solution); } else { delete LP, delete environment; throw std::domain_error("Invalid LP status: " + std::to_string(status)); } } catch (const GRBException &e) { throw std::domain_error( "LP solver exception code: " + std::to_string(e.getErrorCode()) + ", message: " + e.getMessage()); } } std::tuple<double, std::map<std::pair<int, int>, double>> LP_binning_solve( const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0, const double &epsilon, const bool get_solution = false) { try { (void)get_solution; GRBEnv* environment = new GRBEnv(); GRBModel *LP = new GRBModel(*environment); LP->set(GRB_StringAttr_ModelName, "Solve " + std::to_string(epsilon)); LP->set(GRB_IntAttr_ModelSense, GRB_MAXIMIZE); double density = epsilon * (n - n0) * (n - n0 - 1) / 2; // Objective function int var_count = pow(n - n0, 4.0) + pow(n - n0, 2.0) + 1; std::vector<GRBVar> vars(var_count); for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { auto index = LP_get_variable_index(u, i, n, n0); vars[index] = LP->addVar(0.0, 1.0, 0.0, GRB_CONTINUOUS, LP_name("y", {u, i}).c_str()); } } for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { for (int v = n0; v < n; v++) { for (int j = n0; j < n; j++) { auto index = LP_get_variable_index(u, i, v, j, n, n0); if (u != v && i < j) { const auto &p_ij = p_uv.find(std::make_pair(u, v)); vars[index] = LP->addVar( 0.0, 1.0, (p_ij != p_uv.end()) ? static_cast<double>(p_ij->second) : 0.0, GRB_CONTINUOUS, LP_name("w", {u, i, v, j}).c_str()); } else { vars[index] = LP->addVar(0.0, 1.0, 0.0, GRB_CONTINUOUS, LP_name("w", {u, i, v, j}).c_str()); } } } } } int s_index = var_count - 1; vars[s_index] = LP->addVar(0.0, 1 / density, 0.0, GRB_CONTINUOUS, "s"); // Identity for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { GRBLinExpr row = vars[LP_get_variable_index(u, i, n, n0)] - vars[LP_get_variable_index(u, i, u, i, n, n0)]; LP->addConstr(row, GRB_EQUAL, 0.0, LP_name("I", {u, i})); } } // Symmetry for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { for (int v = n0; v < n; v++) { for (int j = i + 1; j < n; j++) { GRBLinExpr row = vars[LP_get_variable_index(u, i, v, j, n, n0)] - vars[LP_get_variable_index(v, j, u, i, n, n0)]; LP->addConstr(row, GRB_EQUAL, 0.0, LP_name("S", {u, i, v, j})); } } } } // y-density for (int u = n0; u < n; u++) { GRBLinExpr row = 0; for (int i = n0; i < n; i++) { row += vars[LP_get_variable_index(u, i, n, n0)]; } row -= vars[s_index]; LP->addConstr(row, GRB_EQUAL, 0.0, LP_name("yD", {u})); } // w-density for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { for (int v = n0; v < n; v++) { GRBLinExpr row = 0; for (int j = n0; j < n; j++) { row += vars[LP_get_variable_index(u, i, v, j, n, n0)]; } row -= vars[LP_get_variable_index(u, i, n, n0)]; LP->addConstr(row, GRB_EQUAL, 0.0, LP_name("wD", {u, i, v})); } } } // Density GRBLinExpr row = 0; for (int u = n0; u < n; u++) { for (int i = n0; i < n; i++) { for (int v = n0; v < n; v++) { for (int j = i + 1; j < n; j++) { if (u != v) { row += vars[LP_get_variable_index(u, i, v, j, n, n0)]; } } } } } LP->addConstr(row, GRB_EQUAL, 1.0, LP_name("D", {})); LP->set(GRB_IntParam_OutputFlag, 0); LP->optimize(); int status = LP->get(GRB_IntAttr_Status); if (status == GRB_OPTIMAL) { double objective = LP->get(GRB_DoubleAttr_ObjVal); std::map<std::pair<int, int>, double> solution; delete LP, delete environment; return std::make_tuple(objective, solution); } else { delete LP, delete environment; throw std::domain_error("Invalid LP status: " + std::to_string(status)); } } catch (const GRBException &e) { throw std::domain_error( "LP solver exception code: " + std::to_string(e.getErrorCode()) + ", message: " + e.getMessage()); } } std::tuple<double, std::map<std::pair<int, int>, double>> IP_ordering_solve( const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0, const double &epsilon, const bool get_solution = false) { try { GRBEnv* environment = new GRBEnv(); GRBModel *IP = new GRBModel(*environment); IP->set(GRB_StringAttr_ModelName, "Solve " + std::to_string(epsilon)); IP->set(GRB_IntAttr_ModelSense, GRB_MAXIMIZE); int density = epsilon * (n - n0) * (n - n0 - 1) / 2; // Objective function std::vector<GRBVar> vars((n - n0) * (n - n0)); for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { auto index = LP_get_variable_index(i, j, n, n0); if (i != j) { const auto &p_ij = p_uv.find(std::make_pair(i, j)); vars[index] = IP->addVar( 0.0, 1.0, (p_ij != p_uv.end()) ? static_cast<double>(p_ij->second) : 0.0, GRB_BINARY, LP_name("y", {i, j})); } else { vars[index] = IP->addVar(0.0, 0.0, 0.0, GRB_INTEGER, LP_name("y", {i, j})); } } } // Antisymmetry add_asymmetry_constraint(IP, vars, n, n0, 1.0); // Transitivity #pragma omp parallel for for (int i = n0; i < n; i++) { for (int j = n0; j < n; j++) { for (int k = n0; k < n; k++) { if (i != j && j != k && i != k) { add_transitivity_constraint(IP, vars, n, n0, 1.0, i, j, k); } } } } // Density add_density_constraint(IP, vars, n, n0, density); IP->set(GRB_IntParam_OutputFlag, 0); IP->optimize(); int status = IP->get(GRB_IntAttr_Status); if (status == GRB_OPTIMAL) { double objective = IP->get(GRB_DoubleAttr_ObjVal) / density; std::map<std::pair<int, int>, double> solution; if (get_solution) { solution = retrieve_solution(IP, n, n0, 1); } delete IP, delete environment; return std::make_tuple(objective, solution); } else { delete IP, delete environment; throw std::domain_error("Invalid IP status: " + std::to_string(status)); } } catch (const GRBException &e) { throw std::domain_error( "IP solver exception code: " + std::to_string(e.getErrorCode()) + ", message: " + e.getMessage()); } }
declare-variant-2.c
void f0 (void); void f1 (void); #pragma omp declare variant /* { dg-error "expected '\\(' before end of line" } */ void f2 (void); #pragma omp declare variant ( /* { dg-error "" } */ void f3 (void); #pragma omp declare variant () /* { dg-error "" } */ void f4 (void); #pragma omp declare variant match(user={condition(0)}) /* { dg-error "expected '\\(' before 'match'" } */ void f5 (void); #pragma omp declare variant (f1) /* { dg-error "expected 'match' before end of line" } */ void f6 (void); #pragma omp declare variant (f1) simd /* { dg-error "expected 'match' before 'simd'" } */ void f7 (void); #pragma omp declare variant (f1) match /* { dg-error "expected '\\(' before end of line" } */ void f8 (void); #pragma omp declare variant (f1) match( /* { dg-error "expected 'construct', 'device', 'implementation' or 'user' before end of line" } */ void f9 (void); #pragma omp declare variant (f1) match() /* { dg-error "expected 'construct', 'device', 'implementation' or 'user' before '\\)' token" } */ void f10 (void); #pragma omp declare variant (f1) match(foo) /* { dg-error "expected 'construct', 'device', 'implementation' or 'user' before 'foo'" } */ void f11 (void); #pragma omp declare variant (f1) match(something={something}) /* { dg-error "expected 'construct', 'device', 'implementation' or 'user' before 'something'" } */ void f12 (void); #pragma omp declare variant (f1) match(user) /* { dg-error "expected '=' before '\\)' token" } */ void f13 (void); #pragma omp declare variant (f1) match(user=) /* { dg-error "expected '\\\{' before '\\)' token" } */ void f14 (void); #pragma omp declare variant (f1) match(user= /* { dg-error "expected '\\\{' before end of line" } */ void f15 (void); #pragma omp declare variant (f1) match(user={) /* { dg-error "expected trait selector name before '\\)' token" } */ void f16 (void); /* { dg-error "expected '\\\}' before" "" { target c++ } .-1 } */ #pragma omp declare variant (f1) match(user={}) /* { dg-error "expected trait selector name before '\\\}' token" } */ void f17 (void); #pragma omp declare variant (f1) match(user={condition}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f18 (void); #pragma omp declare variant (f1) match(user={condition(}) /* { dg-error "expected \[^\n\r]*expression before '\\\}' token" } */ void f19 (void); #pragma omp declare variant (f1) match(user={condition()}) /* { dg-error "expected \[^\n\r]*expression before '\\)' token" } */ void f20 (void); #pragma omp declare variant (f1) match(user={condition(f1)}) /* { dg-error "property must be constant integer expression" "" { target { c || c++11 } } } */ void f21 (void); /* { dg-error "cannot appear in a constant-expression" "" { target c++98_only } .-1 } */ #pragma omp declare variant (f1) match(user={condition(1, 2, 3)}) /* { dg-error "expected '\\)' before ',' token" } */ void f22 (void); #pragma omp declare variant (f1) match(construct={master}) /* { dg-error "selector 'master' not allowed for context selector set 'construct'" } */ void f23 (void); #pragma omp declare variant (f1) match(construct={teams,parallel,master,for}) /* { dg-error "selector 'master' not allowed for context selector set 'construct'" } */ void f24 (void); /* { dg-error "expected '\\\}' before ',' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(construct={parallel(1 /* { dg-error "selector 'parallel' does not accept any properties" } */ void f25 (void); /* { dg-error "expected '\\\}' before end of line" "" { target c++ } .-1 } */ /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-2 } */ #pragma omp declare variant (f1) match(construct={parallel(1)}) /* { dg-error "selector 'parallel' does not accept any properties" } */ void f26 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f0) match(construct={simd(12)}) /* { dg-error "expected \[^\n\r]* clause before" } */ void f27 (void); /* { dg-error "'\\)' before numeric constant" "" { target c++ } .-1 } */ #pragma omp declare variant (f1) match(construct={parallel},construct={for}) /* { dg-error "selector set 'construct' specified more than once" } */ void f28 (void); #pragma omp declare variant (f1) match(construct={parallel},construct={parallel}) /* { dg-error "selector set 'construct' specified more than once" } */ void f29 (void); #pragma omp declare variant (f1) match(user={condition(0)},construct={target},user={condition(0)}) /* { dg-error "selector set 'user' specified more than once" } */ void f30 (void); #pragma omp declare variant (f1) match(user={condition(0)},user={condition(1)}) /* { dg-error "selector set 'user' specified more than once" } */ void f31 (void); #pragma omp declare variant (f1) match(device={kind}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f32 (void); #pragma omp declare variant (f1) match(device={isa}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f33 (void); #pragma omp declare variant (f1) match(device={arch}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f34 (void); #pragma omp declare variant (f1) match(device={kind,isa,arch}) /* { dg-error "expected '\\(' before ',' token" } */ void f35 (void); #pragma omp declare variant (f1) match(device={kind(}) /* { dg-error "expected identifier or string literal before '\\\}' token" } */ void f36 (void); #pragma omp declare variant (f1) match(device={kind(unknown)}) /* { dg-warning "unknown property 'unknown' of 'kind' selector" } */ void f37 (void); #pragma omp declare variant (f1) match(device={kind(unknown,foobar)}) /* { dg-warning "unknown property 'unknown' of 'kind' selector" } */ void f38 (void); /* { dg-warning "unknown property 'foobar' of 'kind' selector" "" { target *-*-* } .-1 } */ #pragma omp declare variant (f1) match(device={isa(1)}) /* { dg-error "expected identifier or string literal before numeric constant" } */ void f39 (void); #pragma omp declare variant (f1) match(device={arch(17)}) /* { dg-error "expected identifier or string literal before numeric constant" } */ void f40 (void); #pragma omp declare variant (f1) match(device={foobar(3)}) void f41 (void); #pragma omp declare variant (f1) match(device={arch(x86_64)},device={isa(avx512vl)}) /* { dg-error "selector set 'device' specified more than once" } */ void f42 (void); #pragma omp declare variant (f1) match(implementation={foobar(3)}) void f43 (void); #pragma omp declare variant (f1) match(implementation={vendor}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f44 (void); #pragma omp declare variant (f1) match(implementation={extension}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f45 (void); #pragma omp declare variant (f1) match(implementation={vendor()}) /* { dg-error "expected identifier or string literal before '\\)' token" } */ void f45 (void); #pragma omp declare variant (f1) match(implementation={vendor(123-234)}) /* { dg-error "expected identifier or string literal before numeric constant" } */ void f46 (void); #pragma omp declare variant (f1) match(implementation={vendor("foobar")}) /* { dg-warning "unknown property '.foobar.' of 'vendor' selector" } */ void f47 (void); #pragma omp declare variant (f1) match(implementation={unified_address(yes)}) /* { dg-error "selector 'unified_address' does not accept any properties" } */ void f48 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(implementation={unified_shared_memory(no)}) /* { dg-error "selector 'unified_shared_memory' does not accept any properties" } */ void f49 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(implementation={dynamic_allocators(42)}) /* { dg-error "selector 'dynamic_allocators' does not accept any properties" } */ void f50 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(implementation={reverse_offload()}) /* { dg-error "selector 'reverse_offload' does not accept any properties" } */ void f51 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order}) /* { dg-error "expected '\\(' before '\\\}' token" } */ void f52 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order(acquire)}) /* { dg-error "incorrect property 'acquire' of 'atomic_default_mem_order' selector" } */ void f53 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order(release)}) /* { dg-error "incorrect property 'release' of 'atomic_default_mem_order' selector" } */ void f54 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order(foobar)}) /* { dg-error "incorrect property 'foobar' of 'atomic_default_mem_order' selector" } */ void f55 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order(relaxed,seq_cst)}) /* { dg-error "expected '\\)' before ',' token" } */ void f56 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order(relaxed)},implementation={atomic_default_mem_order(relaxed)}) /* { dg-error "selector set 'implementation' specified more than once" } */ void f57 (void); #pragma omp declare variant (f1) match(user={foobar(3)}) /* { dg-error "selector 'foobar' not allowed for context selector set 'user'" } */ void f58 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(construct={foobar(3)}) /* { dg-error "selector 'foobar' not allowed for context selector set 'construct'" } */ void f59 (void); /* { dg-error "expected '\\\}' before '\\(' token" "" { target c } .-1 } */ #pragma omp declare variant (f1) match(construct={parallel},foobar={bar}) /* { dg-error "expected 'construct', 'device', 'implementation' or 'user' before 'foobar'" } */ void f60 (void); #pragma omp declare variant (f1) match(construct={parallel,parallel}) /* { dg-error "selector 'parallel' specified more than once in set 'construct'" } */ void f61 (void); #pragma omp declare variant (f1) match(construct={target,parallel,for,simd,parallel}) /* { dg-error "selector 'parallel' specified more than once in set 'construct'" } */ void f62 (void); #pragma omp declare variant (f1) match(construct={target,teams,teams}) /* { dg-error "selector 'teams' specified more than once in set 'construct'" } */ void f63 (void); #pragma omp declare variant (f1) match(construct={single}) /* { dg-error "selector 'single' not allowed for context selector set 'construct'" } */ void f64 (void); #pragma omp declare variant (f1) match(construct={taskgroup}) /* { dg-error "selector 'taskgroup' not allowed for context selector set 'construct'" } */ void f65 (void); #pragma omp declare variant (f1) match(construct={do}) /* { dg-error "selector 'do' not allowed for context selector set 'construct'" } */ void f66 (void); #pragma omp declare variant (f1) match(construct={threadprivate}) /* { dg-error "selector 'threadprivate' not allowed for context selector set 'construct'" } */ void f67 (void); #pragma omp declare variant (f1) match(construct={critical}) /* { dg-error "selector 'critical' not allowed for context selector set 'construct'" } */ void f68 (void); #pragma omp declare variant (f1) match(construct={task}) /* { dg-error "selector 'task' not allowed for context selector set 'construct'" } */ void f69 (void); #pragma omp declare variant (f1) match(construct={taskloop}) /* { dg-error "selector 'taskloop' not allowed for context selector set 'construct'" } */ void f70 (void); #pragma omp declare variant (f1) match(construct={sections}) /* { dg-error "selector 'sections' not allowed for context selector set 'construct'" } */ void f71 (void); #pragma omp declare variant (f1) match(construct={section}) /* { dg-error "selector 'section' not allowed for context selector set 'construct'" } */ void f72 (void); #pragma omp declare variant (f1) match(construct={workshare}) /* { dg-error "selector 'workshare' not allowed for context selector set 'construct'" } */ void f73 (void); #pragma omp declare variant (f1) match(construct={requires}) /* { dg-error "selector 'requires' not allowed for context selector set 'construct'" } */ void f74 (void); #pragma omp declare variant (f1),match(construct={parallel}) /* { dg-error "expected 'match' before ','" } */ void f75 (void); #pragma omp declare variant (f1) match(implementation={atomic_default_mem_order("relaxed")}) /* { dg-error "expected identifier before string constant" } */ void f76 (void); #pragma omp declare variant (f1) match(user={condition(score(&f76):1)}) /* { dg-error "score argument must be constant integer expression" "" { target { ! c++98_only } } } */ void f77 (void); /* { dg-error "cannot appear in a constant-expression" "" { target c++98_only } .-1 } */ #pragma omp declare variant (f1) match(user={condition(score(-130):1)}) /* { dg-error "score argument must be non-negative" } */ void f78 (void);
CMS.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <omp.h> #include "CMS.h" // NOTICE: Shell quartet (MN|PQ) ERI result needs NCART(M)*NCART(M) // *NCART(P)*NCART(Q) + 8 doubles. +8 for statistic information. // TOTAL BUG COUNT FOR THIS: 3 // Read all shell information in a .mol file and normalize all these shells void CMS_read_mol_file(const char *mol_fname, int *natom_, int *nshell_, shell_t **shells_) { int AM_map[128]; AM_map['S'] = 0; AM_map['P'] = 1; AM_map['D'] = 2; AM_map['F'] = 3; AM_map['G'] = 4; AM_map['H'] = 5; AM_map['I'] = 6; AM_map['J'] = 7; AM_map['K'] = 8; AM_map['L'] = 9; FILE *inf; inf = fopen(mol_fname, "r"); if (inf == NULL) { printf("[FATAL] CMS cannot open mol file %s\n", mol_fname); assert(inf != NULL); } // 1. First pass, get the nshell_total int natom, nshell_total = 0; fscanf(inf, "%d", &natom); for (int i = 0; i < natom; i++) { char sym[8]; int nshell, nprimall, nallprimg; double x, y, z; fscanf(inf, "%s %d %d %d", sym, &nshell, &nprimall, &nallprimg); fscanf(inf, "%lf %lf %lf", &x, &y, &z); for (int j = 0; j < nshell; j++) { char type[8]; int nprim, ngen; fscanf(inf, "%s %d %d", type, &nprim, &ngen); nshell_total += ngen; for (int k = 0; k < nprim; k++) { double alpha; fscanf(inf, "%lf", &alpha); for (int l = 0; l < ngen; l++) { double coef; fscanf(inf, "%lf", &coef); } } } } fclose(inf); // 2. Second pass, create Simint shells shell_t *shells = (shell_t *) malloc(sizeof(shell_t) * nshell_total); assert(shells != NULL); int shell_idx = 0; inf = fopen(mol_fname, "r"); fscanf(inf, "%d", &natom); for (int i = 0; i < natom; i++) { char sym[8], type[8]; int nshell, nprimall, nallprimg; int nprim, ngen, sidx; double x, y, z; double alpha, coef; fscanf(inf, "%s %d %d %d", sym, &nshell, &nprimall, &nallprimg); fscanf(inf, "%lf %lf %lf", &x, &y, &z); for (int j = 0; j < nshell; j++) { fscanf(inf, "%s %d %d", type, &nprim, &ngen); for (int l = 0; l < ngen; l++) { sidx = shell_idx + l; simint_initialize_shell(&shells[sidx]); simint_allocate_shell(nprim, &shells[sidx]); shells[sidx].am = AM_map[(char) type[l]]; shells[sidx].nprim = nprim; shells[sidx].x = x; shells[sidx].y = y; shells[sidx].z = z; } for (int k = 0; k < nprim; k++) { fscanf(inf, "%lf", &alpha); for (int l = 0; l < ngen; l++) { fscanf(inf, "%lf", &coef); sidx = shell_idx + l; shells[sidx].alpha[k] = alpha; shells[sidx].coef[k] = coef; } } shell_idx += ngen; } } fclose(inf); // 3. Normalize all shells simint_normalize_shells(nshell_total, shells); *natom_ = natom; *nshell_ = nshell_total; *shells_ = shells; } // Destroy all Simint shells void CMS_destroy_shells(const int nshell, shell_t *shells) { for (int i = 0; i < nshell; i++) simint_free_shell(&shells[i]); } // Destroy all Simint shell pairs void CMS_destroy_shell_pairs(const int num_sp, multi_sp_t *sp) { for (int i = 0; i < num_sp; i++) simint_free_multi_shellpair(&sp[i]); } // Get the number of basis function pairs in a shell pair int CMS_get_sp_nbfp(const multi_sp_p sp) { return NCART(sp->am1) * NCART(sp->am2); } // Print all shell information, for debugging void CMS_print_shells(const int nshell, shell_t *shells) { printf("%d Shells:\n", nshell); for (int i = 0; i < nshell; i++) { printf( "%d, %2d, %.3lf, %.3lf, %.3lf, ", shells[i].am, shells[i].nprim, shells[i].x, shells[i].y, shells[i].z ); int nprim = shells[i].nprim; for (int j = 0; j < nprim; j++) printf("%.3lf, ", shells[i].alpha[j]); for (int j = 0; j < nprim; j++) printf("%.3lf, ", shells[i].coef[j]); printf("\n"); } } // Get the Schwarz screening value from a given set of shells double CMS_get_Schwarz_scrval(const int nshell, shell_t *shells, double *scr_vals) { // 1. Calculate the size of each shell and prepare Simint buffer int *shell_bf_num = (int*) malloc(sizeof(int) * nshell); int max_am = 0, max_am_ncart = 0; assert(shell_bf_num != NULL); for (int i = 0; i < nshell; i++) { int am = shells[i].am; int am_ncart = NCART(am); max_am = MAX(max_am, am); max_am_ncart = MAX(max_am_ncart, am_ncart); shell_bf_num[i] = am_ncart; } size_t work_msize = simint_ostei_workmem(0, max_am); size_t ERI_msize = sizeof(double) * (max_am_ncart * max_am_ncart * max_am_ncart * max_am_ncart); ERI_msize += sizeof(double) * 8; // 2. Calculate (MN|MN) and find the Schwarz screening value double global_max_scrval = 0.0; #pragma omp parallel { struct simint_multi_shellpair MN_pair; simint_initialize_multi_shellpair(&MN_pair); double *work_mem = SIMINT_ALLOC(work_msize); double *ERI_mem = SIMINT_ALLOC(ERI_msize); assert(work_mem != NULL && ERI_mem != NULL); #pragma omp for schedule(dynamic) reduction(max:global_max_scrval) for (int M = 0; M < nshell; M++) { int dimM = shell_bf_num[M]; for (int N = 0; N < nshell; N++) { int dimN = shell_bf_num[N]; simint_create_multi_shellpair(1, &shells[M], 1, &shells[N], &MN_pair, SIMINT_SCREEN_NONE); int ERI_size = simint_compute_eri(&MN_pair, &MN_pair, 0.0, work_mem, ERI_mem); if (ERI_size <= 0) continue; int ld_MNM_M = (dimM * dimN * dimM + dimM); int ld_NM_1 = (dimN * dimM + 1); double max_val = 0.0; for (int iM = 0; iM < dimM; iM++) { for (int iN = 0; iN < dimN; iN++) { int idx = iN * ld_MNM_M + iM * ld_NM_1; double val = fabs(ERI_mem[idx]); max_val = MAX(max_val, val); } } global_max_scrval = MAX(global_max_scrval, max_val); scr_vals[M * nshell + N] = max_val; scr_vals[N * nshell + M] = max_val; } } SIMINT_FREE(ERI_mem); SIMINT_FREE(work_mem); simint_free_multi_shellpair(&MN_pair); } free(shell_bf_num); return global_max_scrval; } // Initialize a Simint buffer structure void CMS_init_Simint_buff(const int max_am, simint_buff_p *buff_) { simint_buff_p buff = (simint_buff_p) malloc(sizeof(struct simint_buff)); assert(buff != NULL); int max_ncart = NCART(max_am); int max_int = max_ncart * max_ncart * max_ncart * max_ncart; buff->work_msize = simint_ostei_workmem(0, max_am); buff->ERI_msize = sizeof(double) * (max_int * NPAIR_SIMD + 8); buff->work_mem = SIMINT_ALLOC(buff->work_msize); buff->ERI_mem = SIMINT_ALLOC(buff->ERI_msize); assert(buff->work_mem != NULL && buff->ERI_mem != NULL); simint_initialize_shell(&buff->NAI_shell1); simint_initialize_shell(&buff->NAI_shell2); simint_initialize_multi_shellpair(&buff->bra_pair); simint_initialize_multi_shellpair(&buff->ket_pair); *buff_ = buff; } // Destroy a Simint buffer structure void CMS_destroy_Simint_buff(simint_buff_p buff) { if (buff == NULL) return; buff->work_msize = 0; buff->ERI_msize = 0; SIMINT_FREE(buff->work_mem); SIMINT_FREE(buff->ERI_mem); simint_free_shell(&buff->NAI_shell1); simint_free_shell(&buff->NAI_shell2); simint_free_multi_shellpair(&buff->bra_pair); simint_free_multi_shellpair(&buff->ket_pair); } // Initialize an ERI batch buffer structure void CMS_init_eri_batch_buff(const int max_am, const int num_param, eri_batch_buff_p *buff_) { eri_batch_buff_p buff = (eri_batch_buff_p) malloc(sizeof(struct eri_batch_buff)); assert(buff != NULL); int num_batch = (max_am + 1) * (max_am + 1); buff->max_am = max_am; buff->num_batch = num_batch; buff->num_param = num_param; int total_ket_pairs = num_batch * NPAIR_SIMD; buff->batch_cnt = (int*) malloc(sizeof(int) * num_batch); buff->sq_param = (int*) malloc(sizeof(int) * total_ket_pairs * num_param); buff->ket_pairs = (multi_sp_p*) malloc(sizeof(multi_sp_p) * total_ket_pairs); assert(buff->batch_cnt != NULL); assert(buff->sq_param != NULL); assert(buff->ket_pairs != NULL); memset(buff->batch_cnt, 0, sizeof(int) * num_batch); simint_initialize_multi_shellpair(&buff->ket_multipairs); *buff_ = buff; } // Destroy an ERI batch buffer structure void CMS_destroy_eri_batch_buff(eri_batch_buff_p buff) { if (buff == NULL) return; buff->max_am = -1; buff->num_batch = 0; buff->num_param = 0; free(buff->batch_cnt); free(buff->sq_param); free(buff->ket_pairs); simint_free_multi_shellpair(&buff->ket_multipairs); } // Push a ket pair into an ERI batch int CMS_push_ket_pair_to_eri_batch( eri_batch_buff_p buff, const int ket_am1, const int ket_am2, const multi_sp_p ket_pair, const int *param ) { int batch_id = ket_am1 * (buff->max_am + 1) + ket_am2; int batch_idx = buff->batch_cnt[batch_id]; if (batch_idx >= NPAIR_SIMD) { return 0; } else { int sq_offset = batch_id * NPAIR_SIMD + batch_idx; buff->ket_pairs[sq_offset] = ket_pair; int *sq_param_p = buff->sq_param + sq_offset * buff->num_param; memcpy(sq_param_p, param, sizeof(int) * buff->num_param); buff->batch_cnt[batch_id]++; return (batch_idx+1); } } // Calculate all shell quartets in an ERI batch void CMS_calc_ERI_batch( eri_batch_buff_p eri_batch_buff, simint_buff_p simint_buff, const int ket_am1, const int ket_am2, int *eri_size, int **batch_param ) { int batch_id = ket_am1 * (eri_batch_buff->max_am + 1) + ket_am2; int n_pair = eri_batch_buff->batch_cnt[batch_id]; multi_sp_p bra_pair = eri_batch_buff->bra_pair; multi_sp_p ket_multipairs = &eri_batch_buff->ket_multipairs; multi_sp_p *batch_ket_pairs = eri_batch_buff->ket_pairs + batch_id * NPAIR_SIMD; int bra_am1 = bra_pair->am1; int bra_am2 = bra_pair->am2; *eri_size = NCART(bra_am1) * NCART(bra_am2) * NCART(ket_am1) * NCART(ket_am2); *batch_param = eri_batch_buff->sq_param + batch_id * NPAIR_SIMD * eri_batch_buff->num_param; ket_multipairs->nprim = 0; simint_cat_shellpairs( n_pair, (const struct simint_multi_shellpair **) batch_ket_pairs, ket_multipairs, SIMINT_SCREEN_NONE ); double prim_scrval = 0.0; int ret = simint_compute_eri( bra_pair, ket_multipairs, prim_scrval, simint_buff->work_mem, simint_buff->ERI_mem ); if (ret == 0) { *eri_size = 0; *batch_param = NULL; } else { eri_batch_buff->batch_cnt[batch_id] = 0; } } void H2ERI_copy_ERI_to_mat( const int num_sq, double *ERI_mem, int *batch_param, double *mat, const int ldm ) { int ncart_MN = batch_param[0]; int ncart_PQ = batch_param[1]; int eri_size = ncart_MN * ncart_PQ; for (int i = 0; i < num_sq; i++) { int row_idx = batch_param[4 * i + 2]; int col_idx = batch_param[4 * i + 3]; double *mat_blk = mat + row_idx * ldm + col_idx; double *ERI_blk = ERI_mem + i * eri_size; for (int j = 0; j < ncart_MN; j++) { double *mat_blk_row = mat_blk + j * ldm; double *ERI_blk_row = ERI_blk + j * ncart_PQ; memcpy(mat_blk_row, ERI_blk_row, sizeof(double) * ncart_PQ); } } } // Calculate shell quartet pairs (N_i M_i|Q_j P_j) and unfold all ERI // results to form a matrix void H2ERI_calc_ERI_pairs_to_mat( const multi_sp_p sp, const int n_bra_pair, const int n_ket_pair, const int *bra_idx, const int *ket_idx, simint_buff_p simint_buff, double *mat, const int ldm, eri_batch_buff_p eri_batch_buff ) { int param[4]; int row_idx = 0; for (int i = 0; i < n_bra_pair; i++) { const multi_sp_p bra_pair = sp + bra_idx[i]; int am_M = bra_pair->am1; int am_N = bra_pair->am2; int ncart_MN = NCART(am_M) * NCART(am_N); eri_batch_buff->bra_pair = bra_pair; memset(eri_batch_buff->batch_cnt, 0, sizeof(int) * eri_batch_buff->num_batch); param[0] = ncart_MN; param[2] = row_idx; int col_idx = 0; for (int j = 0; j < n_ket_pair; j++) { const multi_sp_p ket_pair = sp + ket_idx[j]; int am_P = ket_pair->am1; int am_Q = ket_pair->am2; int ncart_PQ = NCART(am_P) * NCART(am_Q); param[1] = ncart_PQ; param[3] = col_idx; int num_sq = CMS_push_ket_pair_to_eri_batch( eri_batch_buff, am_P, am_Q, ket_pair, &param[0] ); if (num_sq == NPAIR_SIMD) { int eri_size, *batch_param; CMS_calc_ERI_batch( eri_batch_buff, simint_buff, am_P, am_Q, &eri_size, &batch_param ); assert(eri_size > 0); H2ERI_copy_ERI_to_mat(num_sq, simint_buff->ERI_mem, batch_param, mat, ldm); } col_idx += ncart_PQ; } for (int ibatch = 0; ibatch < eri_batch_buff->num_batch; ibatch++) { int num_sq = eri_batch_buff->batch_cnt[ibatch]; if (num_sq == 0) continue; int am_P = ibatch / (eri_batch_buff->max_am + 1); int am_Q = ibatch % (eri_batch_buff->max_am + 1); int eri_size, *batch_param; CMS_calc_ERI_batch( eri_batch_buff, simint_buff, am_P, am_Q, &eri_size, &batch_param ); assert(eri_size > 0); H2ERI_copy_ERI_to_mat(num_sq, simint_buff->ERI_mem, batch_param, mat, ldm); } row_idx += ncart_MN; } } // Calculate NAI pairs (N_i M_i|[x_j, y_j, z_j]) and unfold all NAI // results to form a matrix void H2ERI_calc_NAI_pairs_to_mat( const shell_t *sp_shells, const int num_sp, const int n_bra_pair, const int *sp_idx, const int n_point, double *x, double *y, double *z, double *mat, const int ldm, double *trans_buf ) { double atomic_nums = 1.0; int row_idx = 0; for (int j = 0; j < n_bra_pair; j++) { const shell_t *M_shell = sp_shells + sp_idx[j]; const shell_t *N_shell = sp_shells + sp_idx[j] + num_sp; int am_M = M_shell->am; int am_N = N_shell->am; int ncart_MN = NCART(am_M) * NCART(am_N); for (int i = 0; i < n_point; i++) { simint_compute_potential( 1, &atomic_nums, x + i, y + i, z + i, N_shell, M_shell, trans_buf ); double *mat_blk = mat + row_idx * ldm + i; for (int k = 0; k < ncart_MN; k++) mat_blk[k * ldm] = trans_buf[k]; } row_idx += ncart_MN; } }
SurfaceARAP.h
#ifndef SURFACE_ARAP_INCLUDED #define SURFACE_ARAP_INCLUDED #include "Mesh.h" #include <Eigen/Core> #include <Eigen/Geometry> #include <Eigen/StdVector> #include <Eigen/Sparse> #include <Eigen/SVD> #include <omp.h> Eigen::Matrix2f TriMetricTensor(const Eigen::Vector3f v0, const Eigen::Vector3f v1, const Eigen::Vector3f v2){ Eigen::Vector3f d[2]; d[0] = v1 - v0; d[1] = v2 - v0; Eigen::Matrix2f g; for (int i = 0; i < 2; i++)for (int j = 0; j < 2; j++){ g(i, j) = d[i].dot(d[j]); } return g; } Eigen::Matrix3f TriStiffnesMatrix(const Eigen::Matrix2f & g){ Eigen::Vector2f d[3]; d[0] = Eigen::Vector2f(-1.0, -1.0); d[1] = Eigen::Vector2f(1.0, 0.0); d[2] = Eigen::Vector2f(0.0, 1.0); Eigen::Matrix3f s; Eigen::Matrix2f g_inv = g.inverse(); double triArea = sqrt(g.determinant()); for (int i = 0; i < 3; i++)for (int j = 0; j < 3; j++) s(i, j) = -(d[i].dot(g_inv*d[j]))*triArea; return s; } void TriangleCotangentWeights(const std::vector<Eigen::Vector3f> & vertices, const std::vector<std::vector<TriangleIndex>> & triangles, std::vector<std::vector<Eigen::Vector3f>> & cotangentWeights){ cotangentWeights.resize(triangles.size()); for (int t = 0; t < triangles.size(); t++){ cotangentWeights[t].resize(triangles[t].size()); for (int n = 0; n < triangles[t].size(); n++){ Eigen::Matrix2f g = TriMetricTensor(vertices[triangles[t][n][0]], vertices[triangles[t][n][1]], vertices[triangles[t][n][2]]); if (g.determinant() <= 0.0){ printf("Non Positive Area Triangle!. %f\n", g.determinant()); printf("Mass matrix assigned to 0.000001*Id\n"); g(0, 0) = g(1, 1) = 0.000001; g(0, 1) = g(1, 0) = 0.0; } Eigen::Matrix3f s = TriStiffnesMatrix(g); for (int i = 0; i < 3; i++) cotangentWeights[t][n][i] = s(i, (i + 1) % 3); } } } class ARAPModel{ public: void Initialize(const std::vector<Eigen::Vector3f> & p_referenceVertices, const std::vector<std::vector<TriangleIndex>> & p_triangles, const std::vector<int> & p_fixedIndices, const float & p_softScale); void UpdteSoftWeight(const float & p_softScale); void Solve(const std::vector<Eigen::Vector3f> & softConstraints, std::vector<Eigen::Vector3f> & currentVertices); double Energy(const std::vector<Eigen::Vector3f> & currentVertices, std::vector<double> & energy); std::vector<Eigen::Vector3f> referenceVertices; std::vector<std::vector<TriangleIndex>> triangles; std::vector<std::vector<Eigen::Vector3f>> cotangentWeights; std::vector<float> softWeights; std::vector<int> variableIndex; std::vector<int> fixedIndices; Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>> ARAPCholesky; Eigen::SparseMatrix<float> stiffnessMatrix; int freeVarCount; }; double ARAPModel::Energy(const std::vector<Eigen::Vector3f> & currentVertices, std::vector<double> & energy){ energy.resize(triangles.size()); double cumEnergy = 0; for (int t = 0; t < triangles.size(); t++){ Eigen::Matrix3f scatter = Eigen::Matrix3f::Zero(); for (int n = 0; n < triangles[t].size(); n++){ for (int i = 0; i < 3; i++){ Eigen::Vector3f restEdge = referenceVertices[triangles[t][n][(i + 1) % 3]] - referenceVertices[triangles[t][n][i]]; Eigen::Vector3f newEdge = currentVertices[triangles[t][n][(i + 1) % 3]] - currentVertices[triangles[t][n][i]]; scatter += cotangentWeights[t][n][i] * restEdge*newEdge.transpose(); } } Eigen::JacobiSVD<Eigen::Matrix3f> mSVD(scatter, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix3f U = mSVD.matrixU(); Eigen::Matrix3f V = mSVD.matrixV(); if (U.determinant()*V.determinant() < 0.0) for (int k = 0; k < 3; k++) U(k, 2) *= -1.0; Eigen::Matrix3f rotation = V*U.transpose(); double cumNeighbourhoodEnergy = 0; for (int n = 0; n < triangles[t].size(); n++){ for (int j = 0; j < 3; j++){ Eigen::Vector3f newEdge = currentVertices[triangles[t][n][j]] - currentVertices[triangles[t][n][(j + 1) % 3]]; Eigen::Vector3f restEdge = referenceVertices[triangles[t][n][j]] - referenceVertices[triangles[t][n][(j + 1) % 3]]; Eigen::Vector3f rotatedEdge = rotation * restEdge; cumNeighbourhoodEnergy += (newEdge - rotatedEdge).squaredNorm() * cotangentWeights[t][n][j]; } } if (cumNeighbourhoodEnergy <0) printf("Negative neighbourhood Energy!!\n"); energy[t] = cumNeighbourhoodEnergy; cumEnergy += cumNeighbourhoodEnergy; } return cumEnergy; } void ARAPModel::Solve(const std::vector<Eigen::Vector3f> & softConstraints, std::vector<Eigen::Vector3f> & currentVertices){ Eigen::MatrixXf rhs = Eigen::MatrixXf::Zero(freeVarCount, 3); for (int t = 0; t < triangles.size(); t++){ Eigen::Matrix3f scatter = Eigen::Matrix3f::Zero(); for (int n = 0; n < triangles[t].size(); n++){ for (int i = 0; i < 3; i++){ Eigen::Vector3f restEdge = referenceVertices[triangles[t][n][(i + 1) % 3]] - referenceVertices[triangles[t][n][i]]; Eigen::Vector3f newEdge = currentVertices[triangles[t][n][(i + 1) % 3]] - currentVertices[triangles[t][n][i]]; scatter += cotangentWeights[t][n][i]*restEdge*newEdge.transpose(); } } Eigen::JacobiSVD<Eigen::Matrix3f> mSVD(scatter, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix3f U = mSVD.matrixU(); Eigen::Matrix3f V = mSVD.matrixV(); if (U.determinant()*V.determinant() < 0.0) for (int k = 0; k < 3; k++) U(k, 2) *= -1.0; Eigen::Matrix3f rotation = V*U.transpose(); for (int n = 0; n < triangles[t].size(); n++){ for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j++){ int vP = variableIndex[triangles[t][n][j]]; int vN = variableIndex[triangles[t][n][(j + 1) % 3]]; Eigen::Vector3f restEdge = referenceVertices[triangles[t][n][j]] - referenceVertices[triangles[t][n][(j + 1) % 3]]; Eigen::Vector3f rotatedEdge = rotation * restEdge * cotangentWeights[t][n][j]; if (vP != -1){ rhs.row(vP) += rotatedEdge; if (vN == -1) rhs.row(vP) += currentVertices[triangles[t][n][(j + 1) % 3]] * cotangentWeights[t][n][j]; } if (vN != -1){ rhs.row(vN) -= rotatedEdge; if (vP == -1) rhs.row(vN) += currentVertices[triangles[t][n][j]] * cotangentWeights[t][n][j]; } } } } } int threads = omp_get_num_procs(); #pragma omp parallel for num_threads( threads ) for (int i = 0; i < referenceVertices.size(); i++){ int vi = variableIndex[i]; if(vi != -1) rhs.row(vi) += softConstraints[i] * softWeights[i]; } Eigen::MatrixXf solution = ARAPCholesky.solve(rhs); #pragma omp parallel for num_threads( threads ) for (int i = 0; i < referenceVertices.size(); i++){ int vi = variableIndex[i]; if (vi != -1) for (int k = 0; k < 3; k++) currentVertices[i][k] = solution(vi, k); } } void ARAPModel::UpdteSoftWeight(const float & p_softScale){ int vCount = referenceVertices.size(); softWeights.resize(vCount, p_softScale / float(vCount)); std::vector<Eigen::Triplet<double>> softWeighTriplets; softWeighTriplets.reserve(freeVarCount); for (int i = 0; i < vCount; i++){ int vi = variableIndex[i]; if (vi != -1) softWeighTriplets.push_back(Eigen::Triplet<double>(vi, vi, softWeights[i])); } Eigen::SparseMatrix<float> softWeightsMatrix; softWeightsMatrix.resize(freeVarCount, freeVarCount); softWeightsMatrix.setFromTriplets(softWeighTriplets.begin(), softWeighTriplets.end()); ARAPCholesky.factorize(stiffnessMatrix + softWeightsMatrix); } void ARAPModel::Initialize(const std::vector<Eigen::Vector3f> & p_referenceVertices, const std::vector<std::vector<TriangleIndex>> & p_triangles, const std::vector<int> & p_fixedIndices, const float & p_softScale){ fixedIndices = p_fixedIndices; referenceVertices = p_referenceVertices; triangles = p_triangles; TriangleCotangentWeights(referenceVertices, triangles, cotangentWeights); int vCount = referenceVertices.size(); softWeights.resize(vCount, p_softScale / float(vCount)); {//Set free variables std::vector<bool> isFixed(vCount, false); for (int i = 0; i < fixedIndices.size(); i++)isFixed[fixedIndices[i]] = true; variableIndex.resize(vCount, -1); int varCounter = 0; for (int i = 0; i < vCount; i++) if (!isFixed[i]){ variableIndex[i] = varCounter; varCounter++; } freeVarCount = varCounter; if (freeVarCount != (vCount - fixedIndices.size()))printf("Variable counters unexpected! \n"); } {//Set matrices std::vector<Eigen::Triplet<double>> stiffnessTriplets; for (int t = 0; t < triangles.size(); t++){ for (int n = 0; n < triangles[t].size(); n++){ for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j++){ int vP = variableIndex[triangles[t][n][j]]; int vN = variableIndex[triangles[t][n][(j + 1) % 3]]; if (vP != -1){ stiffnessTriplets.push_back(Eigen::Triplet<double>(vP, vP, cotangentWeights[t][n][j])); if (vN != -1) stiffnessTriplets.push_back(Eigen::Triplet<double>(vP, vN, -cotangentWeights[t][n][j])); } if (vN != -1){ stiffnessTriplets.push_back(Eigen::Triplet<double>(vN, vN, cotangentWeights[t][n][j])); if (vP != -1) stiffnessTriplets.push_back(Eigen::Triplet<double>(vN, vP, -cotangentWeights[t][n][j])); } } } } } stiffnessMatrix.resize(freeVarCount, freeVarCount); stiffnessMatrix.setFromTriplets(stiffnessTriplets.begin(), stiffnessTriplets.end()); std::vector<Eigen::Triplet<double>> softWeighTriplets; softWeighTriplets.reserve(freeVarCount); for (int i = 0; i < vCount; i++){ int vi = variableIndex[i]; if (vi != -1) softWeighTriplets.push_back(Eigen::Triplet<double>(vi, vi, softWeights[i])); } Eigen::SparseMatrix<float> softWeightsMatrix; softWeightsMatrix.resize(freeVarCount, freeVarCount); softWeightsMatrix.setFromTriplets(softWeighTriplets.begin(), softWeighTriplets.end()); ARAPCholesky.analyzePattern(stiffnessMatrix + softWeightsMatrix); ARAPCholesky.factorize(stiffnessMatrix + softWeightsMatrix); } } #endif //SURFACE_ARAP_INCLUDED
NETSPLITLM_fmt_plug.c
/* * NETHALFLM_fmt.c * Written by DSK (Based on NetLM/NetNTLM patch by JoMo-Kun) * Performs brute-force cracking of the HalfLM challenge/response pairs. * * Modified for performance and OMP support by magnum 2011 * * Storage Format: * domain\username:::lm response:nt response:challenge * * NOTE, in loader.c, the format appeared to be domain\username:::lm response:challenge * so that format has been built into the 'prepare' function (JimF). * * Code is in public domain. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETHALFLM; #elif FMT_REGISTERS_H john_register_one(&fmt_NETHALFLM); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "common.h" #include "formats.h" #include "unicode.h" #include <openssl/des.h> #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "nethalflm" #define FORMAT_NAME "HalfLM C/R" #define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 7 #define BINARY_SIZE 8 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH 12 + 2 * SALT_SIZE + CIPHERTEXT_LENGTH // these may be altered in init() if running OMP // and that formula is subject to change #define MIN_KEYS_PER_CRYPT 1 #define THREAD_RATIO 256 #ifdef _OPENMP #define MAX_KEYS_PER_CRYPT 0x10000 #else #define MAX_KEYS_PER_CRYPT THREAD_RATIO #endif static struct fmt_tests tests[] = { {"", "G3RG3P00!", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} }, {"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P00!"}, {"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P0"}, {"$NETHALFLM$1122334455667788$1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "ZEEEZ@1"}, {"", "G3RG3P0", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} }, {"", "ZEEEZ@1", {"domain\\username", "", "", "1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "", "1122334455667788"} }, {NULL} }; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static uchar (*saved_pre)[8]; static uchar (*output)[BINARY_SIZE]; static uchar *challenge; static void init(struct fmt_main *self) { #ifdef _OPENMP int n = MIN_KEYS_PER_CRYPT * omp_get_max_threads(); if (n < MIN_KEYS_PER_CRYPT) n = MIN_KEYS_PER_CRYPT; if (n > MAX_KEYS_PER_CRYPT) n = MAX_KEYS_PER_CRYPT; self->params.min_keys_per_crypt = n; n = n * n * ((n >> 1) + 1) * THREAD_RATIO; if (n > MAX_KEYS_PER_CRYPT) n = MAX_KEYS_PER_CRYPT; self->params.max_keys_per_crypt = n; #endif saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); saved_pre = mem_calloc_tiny(sizeof(*saved_pre) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); output = mem_calloc_tiny(sizeof(*output) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, "$NETHALFLM$", 11)!=0) return 0; if (strlen(ciphertext) < TOTAL_LENGTH) return 0; if (ciphertext[27] != '$') return 0; if (strncmp(&ciphertext[28 + 2 * SALT_SIZE], "00000000000000000000000000000000", 32) == 0) return 0; // This is NTLM ESS C/R for (pos = &ciphertext[28]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++) ; if (!*pos && pos - ciphertext - 28 == CIPHERTEXT_LENGTH) { return 1; } else return 0; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *tmp; if (!strncmp(split_fields[1], "$NETHALFLM$", 11)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[3]) != CIPHERTEXT_LENGTH) return split_fields[1]; // if LMresp == NTresp then it's NTLM-only, not LM if (!strncmp(split_fields[3], split_fields[4], 48)) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (strlen(split_fields[4]) > 31) { if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; } tmp = (char *) mem_alloc(12 + strlen(split_fields[3]) + strlen(split_fields[5]) + 1); sprintf(tmp, "$NETHALFLM$%s$%s", split_fields[5], split_fields[3]); if (valid(tmp,self)) { char *cp2 = str_alloc_copy(tmp); MEM_FREE(tmp); return cp2; } MEM_FREE(tmp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1] = {0}; memcpy(out, ciphertext, TOTAL_LENGTH); strlwr(&out[10]); /* Exclude: $NETHALFLM$ */ return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } binary; int i; ciphertext+=28; for (i=0; i<BINARY_SIZE; i++) { binary.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary.c[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary.c; } static inline void setup_des_key(unsigned char key_56[], DES_key_schedule *ks) { DES_cblock key; key[0] = key_56[0]; key[1] = (key_56[0] << 7) | (key_56[1] >> 1); key[2] = (key_56[1] << 6) | (key_56[2] >> 2); key[3] = (key_56[2] << 5) | (key_56[3] >> 3); key[4] = (key_56[3] << 4) | (key_56[4] >> 4); key[5] = (key_56[4] << 3) | (key_56[5] >> 5); key[6] = (key_56[5] << 2) | (key_56[6] >> 6); key[7] = (key_56[6] << 1); DES_set_key(&key, ks); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; DES_key_schedule ks; int i; #ifdef _OPENMP #pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_pre) #endif for(i=0; i<count; i++) { /* DES-encrypt challenge using the partial LM hash */ setup_des_key(saved_pre[i], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i], &ks, DES_ENCRYPT); } return count; } static int cmp_all(void *binary, int count) { int index; for(index=0; index<count; index++) if (!memcmp(output[index], binary, BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, BINARY_SIZE); } static int cmp_exact(char *source, int index) { return !memcmp(output[index], get_binary(source), BINARY_SIZE); } static void *get_salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 dummy; } out; int i; ciphertext += 11; for (i = 0; i < SALT_SIZE; ++i) { out.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } return (void*)out.c; } static void set_salt(void *salt) { challenge = salt; } static void netsplitlm_set_key(char *key, int index) { const unsigned char magic[] = {0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25}; DES_key_schedule ks; strnzcpyn((char *)saved_plain[index], key, PLAINTEXT_LENGTH + 1); /* Upper-case password */ enc_strupper((char *)saved_plain[index]); /* Generate first 8-bytes of LM hash */ setup_des_key(saved_plain[index], &ks); DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)saved_pre[index], &ks, DES_ENCRYPT); } static char *get_key(int index) { return (char *)saved_plain[index]; } static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static int get_hash_0(int index) { return *(ARCH_WORD_32 *)output[index] & 0xF; } static int get_hash_1(int index) { return *(ARCH_WORD_32 *)output[index] & 0xFF; } static int get_hash_2(int index) { return *(ARCH_WORD_32 *)output[index] & 0xFFF; } static int get_hash_3(int index) { return *(ARCH_WORD_32 *)output[index] & 0xFFFF; } static int get_hash_4(int index) { return *(ARCH_WORD_32 *)output[index] & 0xFFFFF; } static int get_hash_5(int index) { return *(ARCH_WORD_32 *)output[index] & 0xFFFFFF; } static int get_hash_6(int index) { return *(ARCH_WORD_32 *)output[index] & 0x7FFFFFF; } struct fmt_main fmt_NETHALFLM = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, netsplitlm_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
indirectaccess4-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have a distance of 12 (xa2 - xa1 = 12). They are used as base addresses for indirect array accesses using an index set (another array). The index set has two indices with distance of 12 : indexSet[1]- indexSet[0] = 533 - 521 = 12 So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 1 have loop carried dependences. For static even scheduling, we must have at least 180 threads (180/180=1 iterations) so iteration 0 and 1 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 533, 525, 527, 529, 531, // 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } #pragma omp parallel for // default static even scheduling may not trigger data race! for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
aix_smd5_fmt_plug.c
/* AIX smd5 cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_smd5; #elif FMT_REGISTERS_H john_register_one(&fmt_smd5); #else #include <string.h> #include <assert.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 // tuned on i7 w/HT #endif #endif #include "md5.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "aix-smd5" #define FORMAT_NAME "AIX LPA {smd5} (modified crypt-md5)" #define FORMAT_TAG "{smd5}" #define FORMAT_TAG1 "$1$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests smd5_tests[] = { /* following hashes are AIX non-standard smd5 hashes */ {"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"}, {"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"}, {"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"}, /* following hashes are AIX standard smd5 hashes (with corrected tag) * lpa_options = std_hash=true */ {"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"}, {"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"}, {"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int is_standard; unsigned char salt[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0 && strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) ctcopy += FORMAT_TAG_LEN; else ctcopy += FORMAT_TAG1_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* salt */ goto err; if (strlen(p) != 8) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hash */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); keeptr = ctcopy; if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) { ctcopy += FORMAT_TAG_LEN; cs.is_standard = 0; } else { ctcopy += FORMAT_TAG1_LEN; cs.is_standard = 1; } p = strtokm(ctcopy, "$"); strncpy((char*)cs.salt, p, 9); p = strtokm(NULL, "$"); MEM_FREE(keeptr); return (void *)&cs; } #define TO_BINARY(b1, b2, b3) \ value = \ (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out.b[b1] = value >> 16; \ out.b[b2] = value >> 8; \ out.b[b3] = value; static void* get_binary(char *ciphertext) { static union { char b[16]; ARCH_WORD w; } out; char *pos; uint32_t value; if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) pos = ciphertext + FORMAT_TAG_LEN; else pos = ciphertext + FORMAT_TAG1_LEN; while (*pos++ != '$'); TO_BINARY(0, 6, 12); TO_BINARY(1, 7, 13); TO_BINARY(2, 8, 14); TO_BINARY(3, 9, 15); TO_BINARY(4, 10, 5); out.b[11] = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6); return out.b; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } /* * $Id: md5_crypt.c,v 1.1 2002-05-11 14:42:35 cpbotha Exp $ * * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * <[email protected]> wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * Origin: Id: crypt.c,v 1.3 1995/05/30 05:42:22 rgrimes Exp * */ static void crypt_md5(char *pw, char *salt, int is_standard, char *passwd) { char *magic = "$1$"; /* This string is magic for this algorithm. Having * it this way, we can get get better later on */ char *sp, *ep; unsigned char final[16]; int sl, pl, i, j; MD5_CTX ctx, ctx1; /* Refine the Salt first */ sp = salt; /* If it starts with the magic string, then skip that */ if (!strncmp(sp, magic, strlen(magic))) sp += strlen(magic); /* It stops at the first '$', max 8 chars */ for (ep = sp; *ep && *ep != '$' && ep < (sp + 8); ep++) continue; /* get the length of the true salt */ sl = ep - sp; MD5_Init(&ctx); /* The password first, since that is what is most unknown */ MD5_Update(&ctx,(unsigned char *)pw,strlen(pw)); // The following license text applies to the "if" code block // License: belongs to the PUBLIC DOMAIN, donated to hashcat, credits MUST go to atom // (hashcat) and philsmd for their hard work. Thx // Disclaimer: WE PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER // EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // Furthermore, NO GUARANTEES THAT IT WORKS FOR YOU AND WORKS CORRECTLY if (is_standard) { /* Then our magic string */ MD5_Update(&ctx,(unsigned char *)magic,strlen(magic)); /* Then the raw salt */ MD5_Update(&ctx,(unsigned char *)sp,sl); } else { MD5_Update(&ctx,(unsigned char *)sp,sl); } /* Then just as many characters of the MD5_(pw,salt,pw) */ MD5_Init(&ctx1); MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw)); MD5_Update(&ctx1,(unsigned char *)sp,sl); MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw)); MD5_Final(final,&ctx1); for (pl = strlen(pw); pl > 0; pl -= 16) MD5_Update(&ctx,(unsigned char *)final,pl>16 ? 16 : pl); memset(final, 0, sizeof final); /* Then something really weird... */ for (j = 0, i = strlen(pw); i; i >>= 1) if (i & 1) MD5_Update(&ctx, (unsigned char *)final+j, 1); else MD5_Update(&ctx, (unsigned char *)pw+j, 1); /* Now make the output string */ strcpy(passwd, magic); strncat(passwd, sp, sl); strcat(passwd, "$"); MD5_Final(final,&ctx); /* * and now, just to make sure things don't run too fast * On a 60 Mhz Pentium this takes 34 msec, so you would * need 30 seconds to build a 1000 entry dictionary... */ for (i = 0; i < 1000; i++) { MD5_Init(&ctx1); if (i & 1) MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw)); else MD5_Update(&ctx1,(unsigned char *)final,16); if (i % 3) MD5_Update(&ctx1,(unsigned char *)sp,sl); if (i % 7) MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw)); if (i & 1) MD5_Update(&ctx1,(unsigned char *)final,16); else MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw)); MD5_Final(final,&ctx1); } memcpy(passwd, final, 16); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { crypt_md5(saved_key[index], (char*)cur_salt->salt, cur_salt->is_standard, (char *)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void smd5_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int salt_hash(void *salt) { return *(unsigned int*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_smd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG, FORMAT_TAG1 }, smd5_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, smd5_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_saturating, cdw_alignas, cdw_address_space, cdw_number_of_elements /* This one must always be the last enumerator. */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { source_location locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; typedef struct c_arg_tag_d { /* The argument name. */ tree id; /* The type of the argument. */ tree type; } c_arg_tag; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (source_location, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (source_location, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (tree); extern int same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, int, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, tree, tree); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_case (location_t, location_t, tree, bool); extern void c_finish_case (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree *, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_oacc_parallel (location_t, tree, tree); extern tree c_finish_oacc_kernels (location_t, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree); extern tree c_build_va_arg (location_t, tree, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Mode used to build pointers (VOIDmode means ptr_mode). */ extern machine_mode c_default_pointer_mode; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); /* In c-errors.c */ extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); #endif /* ! GCC_C_TREE_H */
GB_unaryop__abs_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_fp64 // op(A') function: GB_tran__abs_uint32_fp64 // C type: uint32_t // A type: double // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_fp64 ( uint32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mclib.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdbool.h> #include <glob.h> #include <unistd.h> #include <dirent.h> #include <limits.h> #include "hdf5.h" #include <math.h> #include <time.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_sf_bessel.h> #include <gsl/gsl_permutation.h> #include <gsl/gsl_sort.h> #include <gsl/gsl_sort_vector.h> //#include "mclib_3d.h" #include "mclib.h" #include <omp.h> #include "mpi.h" #include "mc_synch.h" #define PROP_DIM1 1 #define PROP_DIM2 8 #define PROP_DIM3 8 #define COORD_DIM1 2 #define R_DIM_2D 9120 #define THETA_DIM_2D 2000 //define constants const double A_RAD=7.56e-15, C_LIGHT=2.99792458e10, PL_CONST=6.6260755e-27, FINE_STRUCT=7.29735308e-3, CHARGE_EL= 4.8032068e-10; const double K_B=1.380658e-16, M_P=1.6726231e-24, THOM_X_SECT=6.65246e-25, M_EL=9.1093879e-28 , R_EL=2.817941499892705e-13; int getOrigNumProcesses(int *counted_cont_procs, int **proc_array, char dir[200], int angle_rank, int angle_procs, int last_frame) { int i=0, j=0, val=0, original_num_procs=-1, rand_num=0; int frame2=0, framestart=0, scatt_framestart=0, ph_num=0; double time=0; char mc_chkpt_files[200]="", restrt=""; //define new variable that wont write over the restrt variable in the main part of the code, when its put into the readCheckpoint function struct photon *phPtr=NULL; //pointer to array of photons //DIR * dirp; //struct dirent * entry; //struct stat st = {0}; glob_t files; //if (angle_rank==0) { //find number of mc_checkpt files there are //loop through them and find out which prior processes didnt finish and keep track of which ones didnt snprintf(mc_chkpt_files, sizeof(mc_chkpt_files), "%s%s", dir,"mc_chkpt_*" ); val=glob(mc_chkpt_files, 0, NULL,&files ); //printf("TEST: %s\n", mc_chkpt_files); //look @ a file by choosing rand int between 0 and files.gl_pathc and if the file exists open and read it to get the actual value for the old number of angle_procs srand(angle_rank); //printf("NUM_FILES: %d\n",files.gl_pathc); rand_num=rand() % files.gl_pathc; snprintf(mc_chkpt_files, sizeof(mc_chkpt_files), "%s%s%d%s", dir,"mc_chkpt_", rand_num,".dat" ); //printf("TEST: %s\n", mc_chkpt_files); if ( access( mc_chkpt_files, F_OK ) == -1 ) { while(( access( mc_chkpt_files, F_OK ) == -1 ) ) { rand_num=rand() % files.gl_pathc; snprintf(mc_chkpt_files, sizeof(mc_chkpt_files), "%s%s%d%s", dir,"mc_chkpt_", rand_num,".dat" ); //printf("TEST: %s\n", mc_chkpt_files); } } readCheckpoint(dir, &phPtr, &frame2, &framestart, &scatt_framestart, &ph_num, &restrt, &time, rand_num, &original_num_procs); //original_num_procs= 70; } int count_procs[original_num_procs], count=0; int cont_procs[original_num_procs]; //create array of files including any checkpoint file which may not have been created yet b/c old process was still in 1st frame of scattering for (j=0;j<original_num_procs;j++) { count_procs[j]=j; cont_procs[j]=-1; //set to impossible value for previous mpi process rank that needs to be con't } int limit= (angle_rank != angle_procs-1) ? (angle_rank+1)*original_num_procs/angle_procs : original_num_procs; //char mc_chkpt_files[200]=""; printf("Angle ID: %d, start_num: %d, limit: %d\n", angle_rank, (angle_rank*original_num_procs/angle_procs), limit); count=0; for (j=floor(angle_rank*original_num_procs/angle_procs);j<limit;j++) { snprintf(mc_chkpt_files, sizeof(mc_chkpt_files), "%s%s%d%s", dir,"mc_chkpt_", j,".dat" ); //printf("TEST: %s\n", mc_chkpt_files); if ( access( mc_chkpt_files, F_OK ) != -1 ) { readCheckpoint(dir, &phPtr, &frame2, &framestart, &scatt_framestart, &ph_num, &restrt, &time, count_procs[j], &i); free(phPtr); phPtr=NULL; if ((framestart<=frame2) && (scatt_framestart<=last_frame)) //add another condition here { cont_procs[count]=j; //printf("ACCEPTED: %s\n", mc_chkpt_files); count++; } } else { cont_procs[count]=j; //printf("ACCEPTED: %s\n", mc_chkpt_files); count++; } } (*proc_array)=malloc (count * sizeof (int )); //allocate space to pointer to hold the old process angle_id's count=0; for (i=0;i<original_num_procs;i++) { if (cont_procs[i]!=-1) { (*proc_array)[count]=cont_procs[i]; count++; } } //save number of old processes this process counted need to be restarted *counted_cont_procs=count; globfree(& files); return original_num_procs; } void printPhotons(struct photon *ph, int num_ph, int num_ph_abs, int num_ph_emit, int num_null_ph, int scatt_synch_num_ph, int frame,int frame_inj, int frame_last, char dir[200], int angle_rank, FILE *fPtr ) { //function to save the photons' positions and 4 momentum //now using hdf5 file for each process w/ group structure /(weights or Hydro File #)/(p0,p1,p2,p3, r0, r1, r2, s0, s1, s2, or num_scatt) //open the file if it exists and see if the group exists for the given frame, if frame doesnt exist then write datasets for all photons as extendable //if the frame does exist then read information from the prewritten data and then add new data to it as extended chunk int i=0, count=0, rank=1, net_num_ph=num_ph-num_ph_abs-num_null_ph, weight_net_num_ph= num_ph-num_ph_abs-num_null_ph, global_weight_net_num_ph=(frame==frame_inj) ? num_ph-num_ph_abs-num_null_ph : num_ph_emit-num_ph_abs ; //can have more photons absorbed than emitted, weight_net_num_ph=(frame==frame_inj) ? num_ph-num_ph_abs-num_null_ph : scatt_synch_num_ph #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif char mc_file[200]="", group[200]="", group_weight[200]="", *ph_type=NULL; double p0[net_num_ph], p1[net_num_ph], p2[net_num_ph], p3[net_num_ph] , r0[net_num_ph], r1[net_num_ph], r2[net_num_ph], num_scatt[net_num_ph], weight[weight_net_num_ph], global_weight[net_num_ph]; double s0[net_num_ph], s1[net_num_ph], s2[net_num_ph], s3[net_num_ph], comv_p0[net_num_ph], comv_p1[net_num_ph], comv_p2[net_num_ph], comv_p3[net_num_ph]; hid_t file, file_init, dspace, dspace_weight, dspace_global_weight, fspace, mspace, prop, prop_weight, prop_global_weight, group_id; hid_t dset_p0, dset_p1, dset_p2, dset_p3, dset_r0, dset_r1, dset_r2, dset_s0, dset_s1, dset_s2, dset_s3, dset_num_scatt, dset_weight, dset_weight_2, dset_comv_p0, dset_comv_p1, dset_comv_p2, dset_comv_p3, dset_ph_type; herr_t status, status_group, status_weight, status_weight_2; hsize_t dims[1]={net_num_ph}, dims_weight[1]={weight_net_num_ph}, dims_old[1]={0}; //1 is the number of dimansions for the dataset, called rank hsize_t maxdims[1]={H5S_UNLIMITED}; hsize_t size[1]; hsize_t offset[1]; fprintf(fPtr, "num_ph %d num_ph_abs %d num_null_ph %d num_ph_emit %d\nAllocated weight to be %d values large and other arrays to be %d\n",num_ph,num_ph_abs,num_null_ph,num_ph_emit, weight_net_num_ph, net_num_ph); ph_type=malloc((net_num_ph)*sizeof(char)); //save photon data into large arrays, NEED TO KNOW HOW MANY NULL PHOTONS WE HAVE AKA SAVED SPACE THAT AREN'T ACTUALLY PHOTONS TO PROPERLY SAVE SPACE FOR ARRAYS ABOVE weight_net_num_ph=0; count=0;//used to keep track of weight values since it may not be the same as num_ph //#pragma omp parallel for num_threads(num_thread) reduction(+:weight_net_num_ph) for (i=0;i<num_ph;i++) { if ((ph+i)->weight != 0) { p0[count]= ((ph+i)->p0); p1[count]= ((ph+i)->p1); p2[count]= ((ph+i)->p2); p3[count]= ((ph+i)->p3); r0[count]= ((ph+i)->r0); r1[count]= ((ph+i)->r1); r2[count]= ((ph+i)->r2); s0[count]= ((ph+i)->s0); s1[count]= ((ph+i)->s1); s2[count]= ((ph+i)->s2); s3[count]= ((ph+i)->s3); num_scatt[count]= ((ph+i)->num_scatt); //if ((frame==frame_inj) || ((scatt_synch_num_ph > 0) && ((ph+i)->type == COMPTONIZED_PHOTON))) //if the frame is the same one that the photons were injected in, save the photon weights OR if there are synchrotron photons that havent been absorbed { weight[weight_net_num_ph]= ((ph+i)->weight); weight_net_num_ph++; //fprintf(fPtr, "%d %c %e %e %e %e %e %e %e %e\n", i, (ph+i)->type, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt, (ph+i)->weight, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->p0*C_LIGHT/1.6e-9); } if ((frame==frame_last)) { global_weight[count]=((ph+i)->weight); } *(ph_type+count)=(ph+i)->type; //printf("%d %c %e %e %e %e %e %e %e %e %c\n", i, (ph+i)->type, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt, (ph+i)->weight, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->p0*C_LIGHT/1.6e-9, *(ph_type+count)); count++; } } //make strings for file name and group snprintf(mc_file,sizeof(mc_file),"%s%s%d%s",dir,"mc_proc_", angle_rank, ".h5" ); snprintf(group,sizeof(mc_file),"%d",frame ); //see if file exists, if not create it, if it does just open it status = H5Eset_auto(NULL, NULL, NULL); //turn off automatic error printing file_init=H5Fcreate(mc_file, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); //see if the file initially does/doesnt exist file=file_init; status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); //turn on auto error printing if (file_init<0) { //the file exists, open it with read write file=H5Fopen(mc_file, H5F_ACC_RDWR, H5P_DEFAULT); //fprintf(fPtr,"In IF\n"); //see if the group exists status = H5Eset_auto(NULL, NULL, NULL); status_group = H5Gget_objinfo (file, group, 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); /* fprintf(fPtr, group); if (status_group == 0) { fprintf (fPtr, "The group exists.\n"); //now try to see if there's a weight data set for this group } else { fprintf (fPtr, "The group either does NOT exist\n or some other error occurred.\n"); } */ } if ((file_init>=0) || (status_group != 0) ) { //printf("In IF\n"); //if the file exists, see if the weight exists //snprintf(group_weight,sizeof(group),"/PW",i ); status = H5Eset_auto(NULL, NULL, NULL); status_weight = H5Gget_objinfo (file, "/PW", 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); fprintf(fPtr,"Status of /PW %d\n", status_weight); //the file has been newly created or if the group does not exist then create the group for the frame group_id = H5Gcreate2(file, group, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); /* Modify dataset creation properties, i.e. enable chunking */ prop = H5Pcreate (H5P_DATASET_CREATE); status = H5Pset_chunk (prop, rank, dims); if ((frame==frame_inj) || (scatt_synch_num_ph > 0)) { prop_weight= H5Pcreate (H5P_DATASET_CREATE); status = H5Pset_chunk (prop_weight, rank, dims_weight); } if ((frame==frame_last)) { status = H5Pset_chunk (prop, rank, dims); } /* Create the data space with unlimited dimensions. */ dspace = H5Screate_simple (rank, dims, maxdims); dspace_weight=H5Screate_simple (rank, dims_weight, maxdims); /* Create a new dataset within the file using chunk creation properties. */ dset_p0 = H5Dcreate2 (group_id, "P0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_p1 = H5Dcreate2 (group_id, "P1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_p2 = H5Dcreate2 (group_id, "P2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_p3 = H5Dcreate2 (group_id, "P3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); //if (COMV_SWITCH!=0) #if COMV_SWITCH == ON { dset_comv_p0 = H5Dcreate2 (group_id, "COMV_P0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_comv_p1 = H5Dcreate2 (group_id, "COMV_P1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_comv_p2 = H5Dcreate2 (group_id, "COMV_P2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_comv_p3 = H5Dcreate2 (group_id, "COMV_P3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); } #endif dset_r0 = H5Dcreate2 (group_id, "R0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_r1 = H5Dcreate2 (group_id, "R1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_r2 = H5Dcreate2 (group_id, "R2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); //if (STOKES_SWITCH!=0) #if STOKES_SWITCH == ON { dset_s0 = H5Dcreate2 (group_id, "S0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_s1 = H5Dcreate2 (group_id, "S1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_s2 = H5Dcreate2 (group_id, "S2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); dset_s3 = H5Dcreate2 (group_id, "S3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); } #endif #if SAVE_TYPE == ON { dset_ph_type = H5Dcreate2 (group_id, "PT", H5T_NATIVE_CHAR, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); } #endif dset_num_scatt = H5Dcreate2 (group_id, "NS", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); if ((frame==frame_inj) || (scatt_synch_num_ph > 0)) //if the frame is the same one that the photons were injected in, save the photon weights or if there are emitted photons that havent been absorbed { dset_weight_2 = H5Dcreate2 (group_id, "PW", H5T_NATIVE_DOUBLE, dspace_weight, H5P_DEFAULT, prop_weight, H5P_DEFAULT); //save the new injected photons' weights } if ((frame==frame_last)) { //if saving the injected photons weight dont have to worry about the major ph_weight thats not in a group dset_weight = H5Dcreate2 (file, "PW", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); } /* Write data to dataset */ status = H5Dwrite (dset_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p0); status = H5Dwrite (dset_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p1); status = H5Dwrite (dset_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p2); status = H5Dwrite (dset_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p3); //if (COMV_SWITCH!=0) #if COMV_SWITCH == ON { status = H5Dwrite (dset_comv_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p0); status = H5Dwrite (dset_comv_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p1); status = H5Dwrite (dset_comv_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p2); status = H5Dwrite (dset_comv_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p3); } #endif status = H5Dwrite (dset_r0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r0); status = H5Dwrite (dset_r1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r1); status = H5Dwrite (dset_r2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r2); //if (STOKES_SWITCH!=0) #if STOKES_SWITCH == ON { status = H5Dwrite (dset_s0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s0); status = H5Dwrite (dset_s1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s1); status = H5Dwrite (dset_s2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s2); status = H5Dwrite (dset_s3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s3); } #endif #if SAVE_TYPE == ON { status = H5Dwrite (dset_ph_type, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, ph_type); } #endif status = H5Dwrite (dset_num_scatt, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, num_scatt); if ((frame==frame_inj) || (scatt_synch_num_ph > 0)) { status = H5Dwrite (dset_weight_2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, weight); status = H5Pclose (prop_weight); status = H5Dclose (dset_weight_2); } if ((frame==frame_last)) { //printf("Before write\n"); status = H5Dwrite (dset_weight, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, global_weight); //printf("After write\n"); } status = H5Pclose (prop); } else { //if the group already exists then extend it //find the size of it now /* Open an existing group of the specified file. */ group_id = H5Gopen2(file, group, H5P_DEFAULT); dset_p0 = H5Dopen (group_id, "P0", H5P_DEFAULT); //open dataset //get dimensions of array and save it dspace = H5Dget_space (dset_p0); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims_old //extend the dataset size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_p0, size); /* Select a hyperslab in extended portion of dataset */ fspace = H5Dget_space (dset_p0); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); /* Define memory space */ mspace = H5Screate_simple (rank, dims, NULL); /* Write the data to the extended portion of dataset */ status = H5Dwrite (dset_p0, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, p0); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_p1 = H5Dopen (group_id, "P1", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_p1); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_p1, size); fspace = H5Dget_space (dset_p1); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_p1, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, p1); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_p2 = H5Dopen (group_id, "P2", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_p2); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_p2, size); fspace = H5Dget_space (dset_p2); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_p2, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, p2); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_p3 = H5Dopen (group_id, "P3", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_p3); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_p3, size); fspace = H5Dget_space (dset_p3); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_p3, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, p3); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); //if (COMV_SWITCH!=0) #if COMV_SWITCH == ON { dset_comv_p0 = H5Dopen (group_id, "COMV_P0", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_comv_p0); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_comv_p0, size); fspace = H5Dget_space (dset_comv_p0); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_comv_p0, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, comv_p0); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_comv_p1 = H5Dopen (group_id, "COMV_P1", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_comv_p1); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_comv_p1, size); fspace = H5Dget_space (dset_comv_p1); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_comv_p1, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, comv_p1); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_comv_p2 = H5Dopen (group_id, "COMV_P2", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_comv_p2); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_comv_p2, size); fspace = H5Dget_space (dset_comv_p2); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_comv_p2, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, comv_p2); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_comv_p3 = H5Dopen (group_id, "COMV_P3", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_comv_p3); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_comv_p3, size); fspace = H5Dget_space (dset_comv_p3); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_comv_p3, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, comv_p3); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); } #endif dset_r0 = H5Dopen (group_id, "R0", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_r0); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_r0, size); fspace = H5Dget_space (dset_r0); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_r0, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, r0); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_r1 = H5Dopen (group_id, "R1", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_r1); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_r1, size); fspace = H5Dget_space (dset_r1); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_r1, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, r1); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_r2 = H5Dopen (group_id, "R2", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_r2); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_r2, size); fspace = H5Dget_space (dset_r2); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_r2, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, r2); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); //if (STOKES_SWITCH!=0) #if STOKES_SWITCH == ON { dset_s0 = H5Dopen (group_id, "S0", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_s0); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_s0, size); fspace = H5Dget_space (dset_s0); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_s0, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, s0); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_s1 = H5Dopen (group_id, "S1", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_s1); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_s1, size); fspace = H5Dget_space (dset_s1); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_s1, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, s1); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_s2 = H5Dopen (group_id, "S2", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_s2); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_s2, size); fspace = H5Dget_space (dset_s2); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_s2, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, s2); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); dset_s3 = H5Dopen (group_id, "S3", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_s3); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_s3, size); fspace = H5Dget_space (dset_s3); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_s3, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, s3); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); } #endif #if SAVE_TYPE == ON { dset_ph_type = H5Dopen (group_id, "PT", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_ph_type); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_ph_type, size); fspace = H5Dget_space (dset_ph_type); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_ph_type, H5T_NATIVE_CHAR, mspace, fspace, H5P_DEFAULT, ph_type); status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); } #endif dset_num_scatt = H5Dopen (group_id, "NS", H5P_DEFAULT); //open dataset dspace = H5Dget_space (dset_num_scatt); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims size[0] = dims[0]+ dims_old[0]; status = H5Dset_extent (dset_num_scatt, size); fspace = H5Dget_space (dset_num_scatt); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims, NULL); mspace = H5Screate_simple (rank, dims, NULL); status = H5Dwrite (dset_num_scatt, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, num_scatt); //see if the weights group exists, if it does then we can extend it, otherwise we need to create it and write the new values to it snprintf(group_weight,sizeof(group_weight),"PW",i ); status = H5Eset_auto(NULL, NULL, NULL); status_weight = H5Gget_objinfo (group_id, "PW", 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); fprintf(fPtr,"Status of /frame/PW %d\n", status_weight); //if (((frame==frame_inj) || (scatt_synch_num_ph > 0)) ) { status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); if (((frame==frame_last))) { //make sure to append the newly injected/emitted photons from the most recent set of injected photons to the global weights dset_weight = H5Dopen (file, "PW", H5P_DEFAULT); //open dataset //get dimensions of array and save it dspace = H5Dget_space (dset_weight); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims //extend the dataset size[0] = dims_weight[0]+ dims_old[0]; status = H5Dset_extent (dset_weight, size); /* Select a hyperslab in extended portion of dataset */ fspace = H5Dget_space (dset_weight); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims_weight, NULL); /* Define memory space */ mspace = H5Screate_simple (rank, dims_weight, NULL); /* Write the data to the extended portion of dataset */ status = H5Dwrite (dset_weight, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, weight); } if (status_weight >= 0) { //will have to create the weight dataset for the new set of phtons that have been injected, although it may already be created since emitting photons now //see if the group exists status = H5Eset_auto(NULL, NULL, NULL); status_weight_2 = H5Gget_objinfo (group_id, "/PW", 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); if (status_weight_2 < 0) { //the dataset doesnt exist /* Modify dataset creation properties, i.e. enable chunking */ prop = H5Pcreate (H5P_DATASET_CREATE); status = H5Pset_chunk (prop, rank, dims); /* Create the data space with unlimited dimensions. */ dspace = H5Screate_simple (rank, dims, maxdims); dset_weight_2 = H5Dcreate2 (group_id, "PW", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, prop, H5P_DEFAULT); //save the new injected photons' weights status = H5Dwrite (dset_weight_2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, weight); status = H5Pclose (prop); } else { //it exists and need to modify it dset_weight_2 = H5Dopen (group_id, "PW", H5P_DEFAULT); //open dataset //get dimensions of array and save it dspace = H5Dget_space (dset_weight_2); status=H5Sget_simple_extent_dims(dspace, dims_old, NULL); //save dimesnions in dims //extend the dataset size[0] = dims_weight[0]+ dims_old[0]; status = H5Dset_extent (dset_weight_2, size); /* Select a hyperslab in extended portion of dataset */ fspace = H5Dget_space (dset_weight_2); offset[0] = dims_old[0]; status = H5Sselect_hyperslab (fspace, H5S_SELECT_SET, offset, NULL, dims_weight, NULL); /* Define memory space */ mspace = H5Screate_simple (rank, dims_weight, NULL); /* Write the data to the extended portion of dataset */ status = H5Dwrite (dset_weight_2, H5T_NATIVE_DOUBLE, mspace, fspace, H5P_DEFAULT, weight); } } else { fprintf(fPtr, "The frame exists in the hdf5 file but the weight dataset for the frame doesnt exist, therefore creating it.\n"); fflush(fPtr); prop_weight= H5Pcreate (H5P_DATASET_CREATE); status = H5Pset_chunk (prop_weight, rank, dims_weight); dspace_weight=H5Screate_simple (rank, dims_weight, maxdims); dset_weight_2 = H5Dcreate2 (group_id, "PW", H5T_NATIVE_DOUBLE, dspace_weight, H5P_DEFAULT, prop_weight, H5P_DEFAULT); status = H5Dwrite (dset_weight_2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, weight); status = H5Pclose (prop_weight); } status = H5Dclose (dset_weight_2); } status = H5Sclose (dspace); status = H5Sclose (mspace); status = H5Sclose (fspace); } /* Close resources */ free(ph_type); //status = H5Sclose (dspace); status = H5Dclose (dset_p0); status = H5Dclose (dset_p1); status = H5Dclose (dset_p2); status = H5Dclose (dset_p3); //if (COMV_SWITCH!=0) #if COMV_SWITCH == ON { status = H5Dclose (dset_comv_p0); status = H5Dclose (dset_comv_p1); status = H5Dclose (dset_comv_p2); status = H5Dclose (dset_comv_p3); } #endif status = H5Dclose (dset_r0); status = H5Dclose (dset_r1); status = H5Dclose (dset_r2); //if (STOKES_SWITCH!=0) #if STOKES_SWITCH == ON { status = H5Dclose (dset_s0); status = H5Dclose (dset_s1); status = H5Dclose (dset_s2); status = H5Dclose (dset_s3); } #endif #if SAVE_TYPE == ON { status = H5Dclose (dset_ph_type); } #endif status = H5Dclose (dset_num_scatt); if ((frame==frame_last)) { status = H5Dclose (dset_weight); } /* Close the group. */ status = H5Gclose(group_id); /* Terminate access to the file. */ status = H5Fclose(file); } int saveCheckpoint(char dir[200], int frame, int frame2, int scatt_frame, int ph_num,double time_now, struct photon *ph, int last_frame, int angle_rank,int angle_size ) { //function to save data necessary to restart simulation if it ends //need to save all photon data FILE *fPtr=NULL; char checkptfile[2000]=""; char command[2000]=""; char restart; int i=0, success=0;; snprintf(checkptfile,sizeof(checkptfile),"%s%s%d%s",dir,"mc_chkpt_", angle_rank,".dat" ); //snprintf(checkptfile,sizeof(checkptfile),"%s%s%d%s%d%s",dir,"mc_chkpt_", angle_rank, "_frame_", scatt_frame, ".dat" ); //look at frame 1341? if ((scatt_frame!=last_frame) && (scatt_frame != frame)) { //quick way to preserve old chkpt file if the new one overwrites the old one and corrupts it for some reason snprintf(command, sizeof(command), "%s%s %s_old","exec cp ",checkptfile, checkptfile); system(command); fPtr=fopen(checkptfile, "wb"); //printf("%s\n", checkptfile); if (fPtr==NULL) { printf("Cannot open %s to save checkpoint\n", checkptfile); success=1; } else { //can call printPhotons here or return an int signifying if the checkpoint save worked fwrite(&angle_size, sizeof(int), 1, fPtr); restart='c'; fwrite(&restart, sizeof(char), 1, fPtr); //printf("Rank: %d wrote restart %c\n", angle_rank, restart); fflush(stdout); fwrite(&frame, sizeof(int), 1, fPtr); //printf("Rank: %d wrote frame\n", angle_rank); fflush(stdout); fwrite(&frame2, sizeof(int), 1, fPtr); //printf("Rank: %d wrote frame2\n", angle_rank); fflush(stdout); fwrite(&scatt_frame, sizeof(int), 1, fPtr); //printf("Rank: %d wrote scatt_frame\n", angle_rank); fflush(stdout); fwrite(&time_now, sizeof(double), 1, fPtr); //printf("Rank: %d wrote time_now\n", angle_rank); fflush(stdout); fwrite(&ph_num, sizeof(int), 1, fPtr); //printf("Rank: %d wrote ph_num\n", angle_rank); fflush(stdout); for(i=0;i<ph_num;i++) { #if SYNCHROTRON_SWITCH == ON if (((ph+i)->type == COMPTONIZED_PHOTON) && ((ph+i)->weight != 0)) { (ph+i)->type = OLD_COMPTONIZED_PHOTON; //set this to be an old synchrotron scattered photon } #endif fwrite((ph+i), sizeof(struct photon ), 1, fPtr); //fwrite((ph), sizeof(struct photon )*ph_num, ph_num, fPtr); } success=0; } //printf("Rank: %d wrote photons\n", angle_rank); fflush(stdout); } else if (scatt_frame == frame) { snprintf(command, sizeof(command), "%s%s","exec rm ",checkptfile); system(command); fPtr=fopen(checkptfile, "wb"); //printf("%s\n", checkptfile); fflush(stdout); if (fPtr==NULL) { printf("Cannot open %s to save checkpoint\n", checkptfile); success=1; } else { fwrite(&angle_size, sizeof(int), 1, fPtr); restart='c'; fwrite(&restart, sizeof(char), 1, fPtr); //printf("Rank: %d wrote restart %c\n", angle_rank, restart); fflush(stdout); fwrite(&frame, sizeof(int), 1, fPtr); //printf("Rank: %d wrote frame\n", angle_rank); fflush(stdout); fwrite(&frame2, sizeof(int), 1, fPtr); //printf("Rank: %d wrote frame2\n", angle_rank); fflush(stdout); fwrite(&scatt_frame, sizeof(int), 1, fPtr); //printf("Rank: %d wrote scatt_frame\n", angle_rank); fflush(stdout); fwrite(&time_now, sizeof(double), 1, fPtr); //printf("Rank: %d wrote time_now\n", angle_rank); fflush(stdout); fwrite(&ph_num, sizeof(int), 1, fPtr); //printf("Rank: %d wrote ph_num\n", angle_rank); fflush(stdout); for(i=0;i<ph_num;i++) { #if SYNCHROTRON_SWITCH == ON if (((ph+i)->type == COMPTONIZED_PHOTON) && ((ph+i)->weight != 0)) { (ph+i)->type = OLD_COMPTONIZED_PHOTON; //set this to be an old synchrotron scattered photon } #endif //fwrite((ph), sizeof(struct photon )*ph_num, ph_num, fPtr); fwrite((ph+i), sizeof(struct photon ), 1, fPtr); } //printf("Rank: %d wrote photons\n", angle_rank); success=0; } fflush(stdout); } else { //quick way to preserve old chkpt file if the new one overwrites the old one and corrupts it for some reason snprintf(command, sizeof(command), "%s%s %s_old","exec cp ",checkptfile, checkptfile); system(command); fPtr=fopen(checkptfile, "wb"); //printf("%s\n", checkptfile); if (fPtr==NULL) { printf("Cannot open %s to save checkpoint\n", checkptfile); success=1; } else { //just finished last iteration of scatt_frame fwrite(&angle_size, sizeof(int), 1, fPtr); restart='r'; fwrite(&restart, sizeof(char), 1, fPtr); fwrite(&frame, sizeof(int), 1, fPtr); fwrite(&frame2, sizeof(int), 1, fPtr); for(i=0;i<ph_num;i++) { #if SYNCHROTRON_SWITCH == ON if (((ph+i)->type == COMPTONIZED_PHOTON) && ((ph+i)->weight != 0)) { (ph+i)->type = OLD_COMPTONIZED_PHOTON; //set this to be an old synchrotron scattered photon } #endif fwrite((ph+i), sizeof(struct photon ), 1, fPtr); } success=0; } } if (success==0) { fclose(fPtr); } return success; } int readCheckpoint(char dir[200], struct photon **ph, int *frame2, int *framestart, int *scatt_framestart, int *ph_num, char *restart, double *time, int angle_rank, int *angle_size ) { //function to read in data from checkpoint file FILE *fPtr=NULL; char checkptfile[200]=""; int i=0; int scatt_synch_num_ph=0;//count the number of scattered synchrotron photons from the previosu frame that were saved //int frame, scatt_frame, ph_num, i=0; struct photon *phHolder=NULL; //pointer to struct to hold data read in from checkpoint file snprintf(checkptfile,sizeof(checkptfile),"%s%s%d%s",dir,"mc_chkpt_", angle_rank,".dat" ); printf("Checkpoint file: %s\n", checkptfile); if (access( checkptfile, F_OK ) != -1) //if you can access the file, open and read it { fPtr=fopen(checkptfile, "rb"); //if ((angle_rank==2) || (angle_rank==3) || (angle_rank==4) || (angle_rank==5)) { fread(angle_size, sizeof(int), 1, fPtr); //uncomment once I run MCRAT for the sims that didnt save this originally } fread(restart, sizeof(char), 1, fPtr); //printf("%c\n", *restart); fread(framestart, sizeof(int), 1, fPtr); //printf("%d\n", *framestart); fread(frame2, sizeof(int), 1, fPtr); if((*restart)=='c') { fread(scatt_framestart, sizeof(int), 1, fPtr); //if ((riken_switch==1) && (strcmp(DIM_SWITCH, dim_3d_str)==0) && ((*scatt_framestart)>=3000)) #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if ((*scatt_framestart)>=3000) { *scatt_framestart+=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #else { *scatt_framestart+=1; //add one to start at the next frame after the simulation was interrrupted } #endif //printf("%d\n", *scatt_framestart); fread(time, sizeof(double), 1, fPtr); //printf("%e\n", *time); fread(ph_num, sizeof(int), 1, fPtr); //printf("%d\n", *ph_num); phHolder=malloc(sizeof(struct photon)); (*ph)=malloc(sizeof(struct photon)*(*ph_num)); //allocate memory to hold photon data for (i=0;i<(*ph_num);i++) { fread(phHolder, sizeof(struct photon), 1, fPtr); //printf("%e,%e,%e, %e,%e,%e, %e, %e\n",(ph)->p0, (ph)->p1, (ph)->p2, ph->p3, (ph)->r0, (ph)->r1, (ph)->r2, ph->num_scatt ); (*ph)[i].p0=phHolder->p0; (*ph)[i].p1=phHolder->p1; (*ph)[i].p2=phHolder->p2; (*ph)[i].p3=phHolder->p3; (*ph)[i].comv_p0=phHolder->comv_p0; (*ph)[i].comv_p1=phHolder->comv_p1; (*ph)[i].comv_p2=phHolder->comv_p2; (*ph)[i].comv_p3=phHolder->comv_p3; (*ph)[i].r0= phHolder->r0; (*ph)[i].r1=phHolder->r1 ; (*ph)[i].r2=phHolder->r2; (*ph)[i].s0=phHolder->s0; (*ph)[i].s1=phHolder->s1; (*ph)[i].s2=phHolder->s2; (*ph)[i].s3=phHolder->s3; (*ph)[i].num_scatt=phHolder->num_scatt; (*ph)[i].weight=phHolder->weight; (*ph)[i].nearest_block_index= phHolder->nearest_block_index; (*ph)[i].type= phHolder->type; #if SYNCHROTRON_SWITCH == ON if (((*ph)[i].weight != 0) && (((*ph)[i].type == COMPTONIZED_PHOTON) || ((*ph)[i].type == OLD_COMPTONIZED_PHOTON)) && ((*ph)[i].p0 > 0)) { scatt_synch_num_ph++; } //printf("%d %c %e %e %e %e %e %e %e\n", i, (*ph)[i].type, (*ph)[i].r0, (*ph)[i].r1, (*ph)[i].r2, (*ph)[i].num_scatt, (*ph)[i].weight, (*ph)[i].p0*C_LIGHT/1.6e-9, (*ph)[i].comv_p0); #endif } free(phHolder); //printf("In readcheckpoint count=%d\n", count); } else { //if ((riken_switch==1) && (strcmp(DIM_SWITCH, dim_3d_str)==0) && ((*framestart)>=3000)) #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if ((*framestart)>=3000) { *framestart+=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #else { *framestart+=1; //if the checkpoint file saved and the program was inturrupted before the frame variable had just increased and before the scatt_frame iteration was saved, add one to the frame start } #endif *scatt_framestart=(*framestart); } fclose(fPtr); } else //if not use default { //*framestart=(*framestart); *scatt_framestart=(*framestart); *restart='r'; } return scatt_synch_num_ph; } void readMcPar(char file[200], double *fluid_domain_x, double *fluid_domain_y, double *fps, double *theta_jmin, double *theta_j, double *d_theta_j, double *inj_radius_small, double *inj_radius_large, int *frm0_small, int *frm0_large, int *last_frm, int *frm2_small,int *frm2_large , double *ph_weight_small,double *ph_weight_large,int *min_photons, int *max_photons, char *spect, char *restart) { //function to read mc.par file FILE *fptr=NULL; char buf[100]=""; double theta_deg; //open file fptr=fopen(file,"r"); //read in frames per sec and other variables outlined in main() fscanf(fptr, "%lf",fluid_domain_x); //printf("%lf\n", *fluid_domain_x ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",fluid_domain_y); //printf("%lf\n", *fluid_domain_y ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",fps); //printf("%f\n", *fps ); fgets(buf, 100,fptr); fscanf(fptr, "%d",frm0_small); //printf("%d\n", *frm0_small ); fgets(buf, 100,fptr); fscanf(fptr, "%d",frm0_large); //printf("%d\n", *frm0_large ); fgets(buf, 100,fptr); fscanf(fptr, "%d",last_frm); //printf("%d\n", *last_frm ); fgets(buf, 100,fptr); fscanf(fptr, "%d",frm2_small); *frm2_small+=*frm0_small; //frame to go to is what is given in the file plus the starting frame //printf("%d\n", *frm2_small ); fgets(buf, 100,fptr); //fscanf(fptr, "%d",photon_num); remove photon num because we dont need this //printf("%d\n", *photon_num ); fscanf(fptr, "%d",frm2_large); *frm2_large+=*frm0_large; //frame to go to is what is given in the file plus the starting frame //printf("%d\n", *frm2_large ); fgets(buf, 100,fptr); //fgets(buf, 100,fptr); fscanf(fptr, "%lf",inj_radius_small); //printf("%lf\n", *inj_radius_small ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",inj_radius_large); //printf("%lf\n", *inj_radius_large ); fgets(buf, 100,fptr); //theta jmin fscanf(fptr, "%lf",&theta_deg); *theta_jmin=theta_deg;//*M_PI/180; leave as degrees to manipulate processes //printf("%f\n", *theta_jmin ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",&theta_deg); *theta_j=theta_deg;//*M_PI/180; //printf("%f\n", *theta_j ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",d_theta_j); //*theta_j=theta_deg;//*M_PI/180; //printf("%f\n", *theta_j ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",ph_weight_small); //printf("%f\n", *ph_weight_small ); fgets(buf, 100,fptr); fscanf(fptr, "%lf",ph_weight_large); fgets(buf, 100,fptr); fscanf(fptr, "%d",min_photons); fgets(buf, 100,fptr); fscanf(fptr, "%d",max_photons); fgets(buf, 100,fptr); *spect=getc(fptr); fgets(buf, 100,fptr); //printf("%c\n",*spect); *restart=getc(fptr); fgets(buf, 100,fptr); //dont need this line fo code for MPI //fscanf(fptr, "%d",num_threads); //printf("MAKE SURE THERE IS NO NUM_THREADS LINE IN THE MC.PAR FILE.\n"); //fgets(buf, 100,fptr); //fscanf(fptr, "%d",dim_switch); //printf("MAKE SURE THERE IS NO DIM_SWITCH LINE IN THE MC.PAR FILE.\n"); //printf("%d\n",*dim_switch); //close file fclose(fptr); } void readAndDecimate(char flash_file[200], double r_inj, double fps, double **x, double **y, double **szx, double **szy, double **r,\ double **theta, double **velx, double **vely, double **dens, double **pres, double **gamma, double **dens_lab, double **temp, int *number, int ph_inj_switch, double min_r, double max_r, double min_theta, double max_theta, FILE *fPtr) { //function to read in data from FLASH file hid_t file,dset, space; herr_t status; hsize_t dims[2]={0,0}; //hold dimension size for coordinate data set (mostly interested in dims[0]) double **vel_x_buffer=NULL, **vel_y_buffer=NULL, **dens_buffer=NULL, **pres_buffer=NULL, **coord_buffer=NULL, **block_sz_buffer=NULL; double *velx_unprc=NULL, *vely_unprc=NULL, *dens_unprc=NULL, *pres_unprc=NULL, *x_unprc=NULL, *y_unprc=NULL, *r_unprc=NULL, *szx_unprc=NULL, *szy_unprc=NULL; int i,j,count,x1_count, y1_count, r_count, **node_buffer=NULL, num_nodes=0, elem_factor=0; double x1[8]={-7.0/16,-5.0/16,-3.0/16,-1.0/16,1.0/16,3.0/16,5.0/16,7.0/16}; double ph_rmin=0, ph_rmax=0, ph_thetamin=0, ph_thetamax=0, r_grid_innercorner=0, r_grid_outercorner=0, theta_grid_innercorner=0, theta_grid_outercorner=0, track_min_r=DBL_MAX, track_max_r=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif if (ph_inj_switch==0) { ph_rmin=min_r; ph_rmax=max_r; ph_thetamin=min_theta-2*0.017453292519943295; //min_theta - 2*Pi/180 (2 degrees) ph_thetamax=max_theta+2*0.017453292519943295; //max_theta + 2*Pi/180 (2 degrees) } file = H5Fopen (flash_file, H5F_ACC_RDONLY, H5P_DEFAULT); //ret=H5Pclose(acc_tpl1); fprintf(fPtr, ">> MCRaT: Reading positional, density, pressure, and velocity information...\n"); fflush(fPtr); //printf("Reading coord\n"); dset = H5Dopen (file, "coordinates", H5P_DEFAULT); //get dimensions of array and save it space = H5Dget_space (dset); H5Sget_simple_extent_dims(space, dims, NULL); //save dimesnions in dims //status = H5Sclose (space); //status = H5Dclose (dset); //status = H5Fclose (file); /* * Allocate array of pointers to rows. */ coord_buffer = (double **) malloc (dims[0] * sizeof (double *)); coord_buffer[0] = (double *) malloc (dims[0] * dims[1] * sizeof (double)); block_sz_buffer= (double **) malloc (dims[0] * sizeof (double *)); block_sz_buffer[0] = (double *) malloc (dims[0] * COORD_DIM1 * sizeof (double)); node_buffer= (int **) malloc (dims[0] * sizeof (int *)); node_buffer[0] = (int *) malloc (dims[0] * sizeof (int)); vel_x_buffer= (double **) malloc (dims[0] * sizeof (double *)); vel_x_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double)); vel_y_buffer= (double **) malloc (dims[0] * sizeof (double *)); vel_y_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double)); dens_buffer= (double **) malloc (dims[0] * sizeof (double *)); dens_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double)); pres_buffer= (double **) malloc (dims[0] * sizeof (double *)); pres_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double)); /* * Set the rest of the pointers to rows to the correct addresses. */ for (i=1; i<dims[0]; i++) { coord_buffer[i] = coord_buffer[0] + i * dims[1]; block_sz_buffer[i] = block_sz_buffer[0] + i * COORD_DIM1; node_buffer[i] = node_buffer[0] + i ; vel_x_buffer[i] = vel_x_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3; vel_y_buffer[i] = vel_y_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3; dens_buffer[i] = dens_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3; pres_buffer[i] = pres_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3; } //read data such that first column is x and second column is y //fprintf(fPtr, "Reading Dataset\n"); //fflush(fPtr); //dset = H5Dopen (file, "coordinates", H5P_DEFAULT); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,coord_buffer[0]); //close dataset status = H5Sclose (space); status = H5Dclose (dset); //printf("Reading block size\n"); dset = H5Dopen (file, "block size", H5P_DEFAULT); //printf("Reading Dataset\n"); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,block_sz_buffer[0]); // first column of buffer is x and second column is y status = H5Dclose (dset); //status = H5Fclose (file); dset = H5Dopen (file, "node type", H5P_DEFAULT); status = H5Dread (dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,node_buffer[0]); status = H5Dclose (dset); dset = H5Dopen (file, "velx", H5P_DEFAULT); //printf("Reading Dataset\n"); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,vel_x_buffer[0]); status = H5Dclose (dset); //status = H5Fclose (file); //printf("Reading vely\n"); dset = H5Dopen (file, "vely", H5P_DEFAULT); //printf("Reading Dataset\n"); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,vel_y_buffer[0]); status = H5Dclose (dset); //status = H5Fclose (file); //printf("Reading dens\n"); dset = H5Dopen (file, "dens", H5P_DEFAULT); //printf("Reading Dataset\n"); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,dens_buffer[0]); status = H5Dclose (dset); //printf("Reading pres\n"); dset = H5Dopen (file, "pres", H5P_DEFAULT); //printf("Reading Dataset\n"); status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,pres_buffer[0]); status = H5Dclose (dset); //H5Pclose(xfer_plist); status = H5Fclose (file); fprintf(fPtr,">> Selecting good node types (=1)\n"); //find out how many good nodes there are for (i=0;i<dims[0];i++) { if (node_buffer[i][0]==1 ){ num_nodes++; } } //allocate memory for arrays to hold unprocessed data pres_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); dens_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); velx_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); vely_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); x_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); y_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); r_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); szx_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); szy_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double )); //find where the good values corresponding to the good gones (=1) and save them to the previously allocated pointers which are 1D arrays //also create proper x and y arrays and block size arrays //and then free up the buffer memory space fprintf(fPtr,">> Creating and reshaping arrays\n"); count=0; for (i=0;i<dims[0];i++) { if (node_buffer[i][0]==1 ) { x1_count=0; y1_count=0; for (j=0;j<(PROP_DIM1*PROP_DIM2*PROP_DIM3);j++) { *(pres_unprc+count)=pres_buffer[i][j]*HYDRO_P_SCALE; *(dens_unprc+count)=dens_buffer[i][j]*HYDRO_D_SCALE; *(velx_unprc+count)=vel_x_buffer[i][j]; *(vely_unprc+count)=vel_y_buffer[i][j]; *(szx_unprc+count)=((block_sz_buffer[i][0])/8)*HYDRO_L_SCALE; //divide by 8 for resolution, multiply by 1e9 to scale properly? *(szy_unprc+count)=((block_sz_buffer[i][1])/8)*HYDRO_L_SCALE; if (j%8==0) { x1_count=0; } if ((j%8==0) && (j!=0)) { y1_count++; } *(x_unprc+count)=(coord_buffer[i][0]+block_sz_buffer[i][0]*x1[x1_count])*HYDRO_L_SCALE; *(y_unprc+count)=(coord_buffer[i][1]+block_sz_buffer[i][1]*x1[y1_count])*HYDRO_L_SCALE; //printf("%d,%d,%d,%d\n",count,j,x1_count,y1_count); x1_count++; count++; } } } free (pres_buffer[0]); free (dens_buffer[0]);free (vel_x_buffer[0]);free (vel_y_buffer[0]); free(coord_buffer[0]);free(block_sz_buffer[0]);free(node_buffer[0]); free (pres_buffer);free(dens_buffer);free(vel_x_buffer);free(vel_y_buffer);free(coord_buffer);free(block_sz_buffer);free(node_buffer); //fill in radius array and find in how many places r > injection radius //have single thread execute this while loop and then have inner loop be parallel #if SYNCHROTRON_SWITCH == ON elem_factor=2; #else elem_factor=0; #endif r_count=0; while (r_count==0) { r_count=0; elem_factor++; for (i=0;i<count;i++) { *(r_unprc+i)=pow((*(x_unprc+i))*(*(x_unprc+i))+(*(y_unprc+i))*(*(y_unprc+i)),0.5); if (ph_inj_switch==0) { r_grid_innercorner = pow((*(x_unprc+i) - *(szx_unprc+i)/2.0) * ((*(x_unprc+i) - *(szx_unprc+i)/2.0))+(*(y_unprc+i) - *(szx_unprc+i)/2.0) * (*(y_unprc+i) - *(szx_unprc+i)/2.0),0.5); r_grid_outercorner = pow((*(x_unprc+i) + *(szx_unprc+i)/2.0) * ((*(x_unprc+i) + *(szx_unprc+i)/2.0))+(*(y_unprc+i) + *(szx_unprc+i)/2.0) * (*(y_unprc+i) + *(szx_unprc+i)/2.0),0.5); theta_grid_innercorner = acos( (*(y_unprc+i) - *(szx_unprc+i)/2.0) /r_grid_innercorner); //arccos of y/r for the bottom left corner theta_grid_outercorner = acos( (*(y_unprc+i) + *(szx_unprc+i)/2.0) /r_grid_outercorner); if (((ph_rmin - elem_factor*C_LIGHT/fps) <= r_grid_outercorner) && (r_grid_innercorner <= (ph_rmax + elem_factor*C_LIGHT/fps) ) && (theta_grid_outercorner >= ph_thetamin) && (theta_grid_innercorner <= ph_thetamax) ) { r_count++; } } else { if (*(r_unprc+i)> (0.95*r_inj) ) { r_count++; } } } //fprintf(fPtr, "r_count: %d count: %d\n", r_count, count); } fprintf(fPtr, "Elem factor: %d Ph_rmin: %e rmax: %e Chosen FLASH min_r: %e max_r: %e min_theta: %e degrees max_theta: %e degrees\n", elem_factor, ph_rmin, ph_rmax, ph_rmin - (elem_factor*C_LIGHT/fps), ph_rmax + (elem_factor*C_LIGHT/fps), ph_thetamin*180/M_PI, ph_thetamax*180/M_PI); fflush(fPtr); //allocate memory to hold processed data (*pres)=malloc (r_count * sizeof (double )); (*velx)=malloc (r_count * sizeof (double )); (*vely)=malloc (r_count * sizeof (double )); (*dens)=malloc (r_count * sizeof (double )); (*x)=malloc (r_count * sizeof (double )); (*y)=malloc (r_count * sizeof (double )); (*r)=malloc (r_count * sizeof (double )); (*theta)=malloc (r_count * sizeof (double )); (*gamma)=malloc (r_count * sizeof (double )); (*dens_lab)=malloc (r_count * sizeof (double )); (*szx)=malloc (r_count * sizeof (double )); (*szy)=malloc (r_count * sizeof (double )); (*temp)=malloc (r_count * sizeof (double )); //assign values based on r> 0.95*r_inj j=0; for (i=0;i<count;i++) { if (ph_inj_switch==0) { r_grid_innercorner = pow((*(x_unprc+i) - *(szx_unprc+i)/2.0) * ((*(x_unprc+i) - *(szx_unprc+i)/2.0))+(*(y_unprc+i) - *(szx_unprc+i)/2.0) * (*(y_unprc+i) - *(szx_unprc+i)/2.0),0.5); r_grid_outercorner = pow((*(x_unprc+i) + *(szx_unprc+i)/2.0) * ((*(x_unprc+i) + *(szx_unprc+i)/2.0))+(*(y_unprc+i) + *(szx_unprc+i)/2.0) * (*(y_unprc+i) + *(szx_unprc+i)/2.0),0.5); theta_grid_innercorner = acos( (*(y_unprc+i) - *(szx_unprc+i)/2.0) /r_grid_innercorner); //arccos of y/r for the bottom left corner theta_grid_outercorner = acos( (*(y_unprc+i) + *(szx_unprc+i)/2.0) /r_grid_outercorner); if (((ph_rmin - elem_factor*C_LIGHT/fps) <= r_grid_outercorner) && (r_grid_innercorner <= (ph_rmax + elem_factor*C_LIGHT/fps) ) && (theta_grid_outercorner >= ph_thetamin) && (theta_grid_innercorner <= ph_thetamax)) { (*pres)[j]=*(pres_unprc+i); (*velx)[j]=*(velx_unprc+i); (*vely)[j]=*(vely_unprc+i); (*dens)[j]=*(dens_unprc+i); (*x)[j]=*(x_unprc+i); (*y)[j]=*(y_unprc+i); (*r)[j]=*(r_unprc+i); (*szx)[j]=*(szx_unprc+i); (*szy)[j]=*(szy_unprc+i); (*theta)[j]=atan2( *(x_unprc+i) , *(y_unprc+i) );//theta in radians in relation to jet axis (*gamma)[j]=pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1); //v is in units of c (*dens_lab)[j]= (*(dens_unprc+i)) * (pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1)); (*temp)[j]=pow(3*(*(pres_unprc+i))/(A_RAD) ,1.0/4.0); j++; /* if (*(r_unprc+i)<track_min_r) { track_min_r=*(r_unprc+i); } if (*(r_unprc+i)>track_max_r) { track_max_r=*(r_unprc+i); } */ } } else { if (*(r_unprc+i)> (0.95*r_inj) ) { (*pres)[j]=*(pres_unprc+i); (*velx)[j]=*(velx_unprc+i); (*vely)[j]=*(vely_unprc+i); (*dens)[j]=*(dens_unprc+i); (*x)[j]=*(x_unprc+i); (*y)[j]=*(y_unprc+i); (*r)[j]=*(r_unprc+i); (*szx)[j]=*(szx_unprc+i); (*szy)[j]=*(szy_unprc+i); (*theta)[j]=atan2( *(x_unprc+i) , *(y_unprc+i) );//theta in radians in relation to jet axis (*gamma)[j]=pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1); //v is in units of c (*dens_lab)[j]= (*(dens_unprc+i)) * (pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1)); (*temp)[j]=pow(3*(*(pres_unprc+i))/(A_RAD) ,1.0/4.0); j++; } } } //fprintf(fPtr, "Actual Min and Max Flash grid radii are: %e %e\n", track_min_r, track_max_r); //fflush(fPtr); *number=r_count; free(pres_unprc); free(velx_unprc);free(vely_unprc);free(dens_unprc);free(x_unprc); free(y_unprc);free(r_unprc);free(szx_unprc);free(szy_unprc); //exit(0); } void photonInjection( struct photon **ph, int *ph_num, double r_inj, double ph_weight, int min_photons, int max_photons, char spect, int array_length, double fps, double theta_min, double theta_max,\ double *x, double *y, double *szx, double *szy, double *r, double *theta, double *temps, double *vx, double *vy, gsl_rng * rand, FILE *fPtr) { int i=0, block_cnt=0, *ph_dens=NULL, ph_tot=0, j=0,k=0; double ph_dens_calc=0.0, fr_dum=0.0, y_dum=0.0, yfr_dum=0.0, fr_max=0, bb_norm=0, position_phi, ph_weight_adjusted, rmin, rmax; double com_v_phi, com_v_theta, *p_comv=NULL, *boost=NULL; //comoving phi, theta, comoving 4 momentum for a photon, and boost for photon(to go to lab frame) double *l_boost=NULL; //pointer to hold array of lorentz boost, to lab frame, values float num_dens_coeff; double r_grid_innercorner=0, r_grid_outercorner=0, theta_grid_innercorner=0, theta_grid_outercorner=0; double position_rand=0, position2_rand=0; if (spect=='w') //from MCRAT paper, w for wien spectrum { num_dens_coeff=8.44; //printf("in wien spectrum\n"); } else { num_dens_coeff=20.29; //this is for black body spectrum //printf("in BB spectrum"); } //find how many blocks are near the injection radius within the angles defined in mc.par, get temperatures and calculate number of photons to allocate memory for //and then rcord which blocks have to have "x" amount of photons injected there rmin=r_inj - 0.5*C_LIGHT/fps; rmax=r_inj + 0.5*C_LIGHT/fps; for(i=0;i<array_length;i++) { #if GEOMETRY == CARTESIAN r_grid_innercorner = pow((*(x+i) - *(szx+i)/2.0) * ((*(x+i) - *(szx+i)/2.0))+(*(y+i) - *(szy+i)/2.0) * (*(y+i) - *(szy+i)/2.0),0.5); r_grid_outercorner = pow((*(x+i) + *(szx+i)/2.0) * ((*(x+i) + *(szx+i)/2.0))+(*(y+i) + *(szy+i)/2.0) * (*(y+i) + *(szy+i)/2.0),0.5); theta_grid_innercorner = acos( (*(y+i) - *(szx+i)/2.0) /r_grid_innercorner); //arccos of y/r for the bottom left corner theta_grid_outercorner = acos( (*(y+i) + *(szx+i)/2.0) /r_grid_outercorner); #elif GEOMETRY == SPHERICAL r_grid_innercorner = (*(r+i)) - 0.5 * (*(szx+i)); r_grid_outercorner = (*(r+i)) + 0.5 * (*(szx+i)); theta_grid_innercorner = (*(theta+i)) - 0.5 * (*(szy+i)); theta_grid_outercorner = (*(theta+i)) + 0.5 * (*(szy+i)); #endif //look at all boxes in width delta r=c/fps and within angles we are interested in NEED TO IMPLEMENT //if ((*(r+i) >= rmin) && (*(r+i) < rmax ) && (*(theta+i)< theta_max) && (*(theta+i) >=theta_min) ) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { block_cnt++; } } //printf("Blocks: %d\n", block_cnt); //allocate memory to record density of photons for each block ph_dens=malloc(block_cnt * sizeof(int)); //calculate the photon density for each block and save it to the array j=0; ph_tot=0; ph_weight_adjusted=ph_weight; //printf("%d %d\n", max_photons, min_photons); while ((ph_tot>max_photons) || (ph_tot<min_photons) ) { j=0; ph_tot=0; for (i=0;i<array_length;i++) { //printf("%d\n",i); //printf("%e, %e, %e, %e, %e, %e\n", *(r+i),(r_inj - C_LIGHT/fps), (r_inj + C_LIGHT/fps), *(theta+i) , theta_max, theta_min); #if GEOMETRY == CARTESIAN r_grid_innercorner = pow((*(x+i) - *(szx+i)/2.0) * ((*(x+i) - *(szx+i)/2.0))+(*(y+i) - *(szy+i)/2.0) * (*(y+i) - *(szy+i)/2.0),0.5); r_grid_outercorner = pow((*(x+i) + *(szx+i)/2.0) * ((*(x+i) + *(szx+i)/2.0))+(*(y+i) + *(szy+i)/2.0) * (*(y+i) + *(szy+i)/2.0),0.5); theta_grid_innercorner = acos( (*(y+i) - *(szx+i)/2.0) /r_grid_innercorner); //arccos of y/r for the bottom left corner theta_grid_outercorner = acos( (*(y+i) + *(szx+i)/2.0) /r_grid_outercorner); #elif GEOMETRY == SPHERICAL r_grid_innercorner = (*(r+i)) - 0.5 * (*(szx+i)); r_grid_outercorner = (*(r+i)) + 0.5 * (*(szx+i)); theta_grid_innercorner = (*(theta+i)) - 0.5 * (*(szy+i)); theta_grid_outercorner = (*(theta+i)) + 0.5 * (*(szy+i)); #endif //if ((*(r+i) >= rmin) && (*(r+i) < rmax ) && (*(theta+i)< theta_max) && (*(theta+i) >=theta_min) ) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { #if GEOMETRY == SPHERICAL { ph_dens_calc=(num_dens_coeff*2.0*M_PI*pow(*(r+i),2)*sin(*(theta+i))*pow(*(temps+i),3.0)*(*(szx+i))*(*(szy+i)) /(ph_weight_adjusted))*pow(pow(1.0-(pow(*(vx+i),2)+pow(*(vy+i),2)),0.5),-1); //dV=2 *pi* r^2 Sin(theta) dr dtheta } #else { //using FLASH ph_dens_calc=(4.0/3.0)*(num_dens_coeff*2.0*M_PI*(*(x+i))*pow(*(temps+i),3.0)*(*(szx+i))*(*(szy+i)) /(ph_weight_adjusted))*pow(pow(1.0-(pow(*(vx+i),2)+pow(*(vy+i),2)),0.5),-1) ; //a*T^3/(weight) dV, dV=2*PI*x*dx^2, } #endif (*(ph_dens+j))=gsl_ran_poisson(rand,ph_dens_calc) ; //choose from poission distribution with mean of ph_dens_calc //printf("%d, %lf \n",*(ph_dens+j), ph_dens_calc); //sum up all the densities to get total number of photons ph_tot+=(*(ph_dens+j)); j++; } } if (ph_tot>max_photons) { //if the number of photons is too big make ph_weight larger ph_weight_adjusted*=10; } else if (ph_tot<min_photons) { ph_weight_adjusted*=0.5; } //printf("dens: %d, photons: %d\n", *(ph_dens+(j-1)), ph_tot); } //printf("%d\n", ph_tot); //allocate memory for that many photons and also allocate memory to hold comoving 4 momentum of each photon and the velocity of the fluid (*ph)=malloc (ph_tot * sizeof (struct photon )); p_comv=malloc(4*sizeof(double)); boost=malloc(3*sizeof(double)); l_boost=malloc(4*sizeof(double)); //go through blocks and assign random energies/locations to proper number of photons ph_tot=0; k=0; for (i=0;i<array_length;i++) { #if GEOMETRY == CARTESIAN r_grid_innercorner = pow((*(x+i) - *(szx+i)/2.0) * ((*(x+i) - *(szx+i)/2.0))+(*(y+i) - *(szy+i)/2.0) * (*(y+i) - *(szy+i)/2.0),0.5); r_grid_outercorner = pow((*(x+i) + *(szx+i)/2.0) * ((*(x+i) + *(szx+i)/2.0))+(*(y+i) + *(szy+i)/2.0) * (*(y+i) + *(szy+i)/2.0),0.5); theta_grid_innercorner = acos( (*(y+i) - *(szx+i)/2.0) /r_grid_innercorner); //arccos of y/r for the bottom left corner theta_grid_outercorner = acos( (*(y+i) + *(szx+i)/2.0) /r_grid_outercorner); #elif GEOMETRY == SPHERICAL r_grid_innercorner = (*(r+i)) - 0.5 * (*(szx+i)); r_grid_outercorner = (*(r+i)) + 0.5 * (*(szx+i)); theta_grid_innercorner = (*(theta+i)) - 0.5 * (*(szy+i)); theta_grid_outercorner = (*(theta+i)) + 0.5 * (*(szy+i)); #endif //if ((*(r+i) >= rmin) && (*(r+i) < rmax ) && (*(theta+i)< theta_max) && (*(theta+i) >=theta_min) ) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { //*(temps+i)=0.76*(*(temps+i)); for(j=0;j<( *(ph_dens+k) ); j++ ) { //have to get random frequency for the photon comoving frequency y_dum=1; //initalize loop yfr_dum=0; while (y_dum>yfr_dum) { fr_dum=gsl_rng_uniform_pos(rand)*6.3e11*(*(temps+i)); //in Hz //printf("%lf, %lf ",gsl_rng_uniform_pos(rand), (*(temps+i))); y_dum=gsl_rng_uniform_pos(rand); //printf("%lf ",fr_dum); if (spect=='w') { yfr_dum=(1.0/(1.29e31))*pow((fr_dum/(*(temps+i))),3.0)/(exp((PL_CONST*fr_dum)/(K_B*(*(temps+i)) ))-1); //curve is normalized to maximum } else { fr_max=(5.88e10)*(*(temps+i));//(C_LIGHT*(*(temps+i)))/(0.29); //max frequency of bb bb_norm=(PL_CONST*fr_max * pow((fr_max/C_LIGHT),2.0))/(exp(PL_CONST*fr_max/(K_B*(*(temps+i))))-1); //find value of bb at fr_max yfr_dum=((1.0/bb_norm)*PL_CONST*fr_dum * pow((fr_dum/C_LIGHT),2.0))/(exp(PL_CONST*fr_dum/(K_B*(*(temps+i))))-1); //curve is normalized to vaue of bb @ max frequency } //printf("%lf, %lf,%lf,%e \n",(*(temps+i)),fr_dum, y_dum, yfr_dum); } //printf("i: %d freq:%lf\n ",ph_tot, fr_dum); position_phi=gsl_rng_uniform(rand)*2*M_PI; com_v_phi=gsl_rng_uniform(rand)*2*M_PI; com_v_theta=acos((gsl_rng_uniform(rand)*2)-1); //printf("%lf, %lf, %lf\n", position_phi, com_v_phi, com_v_theta); //populate 4 momentum comoving array *(p_comv+0)=PL_CONST*fr_dum/C_LIGHT; *(p_comv+1)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*cos(com_v_phi); *(p_comv+2)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*sin(com_v_phi); *(p_comv+3)=(PL_CONST*fr_dum/C_LIGHT)*cos(com_v_theta); //populate boost matrix, not sure why multiplying by -1, seems to give correct answer in old python code... *(boost+0)=-1*(*(vx+i))*cos(position_phi); *(boost+1)=-1*(*(vx+i))*sin(position_phi); *(boost+2)=-1*(*(vy+i)); //boost to lab frame lorentzBoost(boost, p_comv, l_boost, 'p', fPtr); //printf("Assignemnt: %e, %e, %e, %e\n", *(l_boost+0), *(l_boost+1), *(l_boost+2),*(l_boost+3)); (*ph)[ph_tot].p0=(*(l_boost+0)); (*ph)[ph_tot].p1=(*(l_boost+1)); (*ph)[ph_tot].p2=(*(l_boost+2)); (*ph)[ph_tot].p3=(*(l_boost+3)); (*ph)[ph_tot].comv_p0=(*(p_comv+0)); (*ph)[ph_tot].comv_p1=(*(p_comv+1)); (*ph)[ph_tot].comv_p2=(*(p_comv+2)); (*ph)[ph_tot].comv_p3=(*(p_comv+3)); //place photons in rand positions within fluid element #if GEOMETRY == CARTESIAN position_rand=gsl_rng_uniform_pos(rand)*(*(szx+i))-(*(szx+i))/2.0; //choose between -size/2 to size/2 (*ph)[ph_tot].r0= (*(x+i)+position_rand)*cos(position_phi); //put photons @ center of box that they are supposed to be in with random phi (*ph)[ph_tot].r1=(*(x+i)+position_rand)*sin(position_phi) ; position_rand=gsl_rng_uniform_pos(rand)*(*(szx+i))-(*(szx+i))/2.0; (*ph)[ph_tot].r2=(*(y+i)+position_rand); //y coordinate in flash becomes z coordinate in MCRaT #elif GEOMETRY == SPHERICAL position_rand=gsl_rng_uniform_pos(rand)*(*(szx+i))-(*(szx+i))/2.0; //choose between -size/2 to size/2 position2_rand=gsl_rng_uniform_pos(rand)*(*(szy+i))-(*(szy+i))/2.0; (*ph)[ph_tot].r0= (*(r+i)+position_rand)*sin(*(theta+i)+position2_rand)*cos(position_phi); //put photons @ center of box that they are supposed to be in with random phi (*ph)[ph_tot].r1=(*(r+i)+position_rand)*sin(*(theta+i)+position2_rand)*sin(position_phi) ; (*ph)[ph_tot].r2=(*(r+i)+position_rand)*cos(*(theta+i)+position2_rand); //y coordinate in flash becomes z coordinate in MCRaT #endif (*ph)[ph_tot].s0=1; //initalize stokes parameters as non polarized photon, stokes parameterized are normalized such that I always =1 (*ph)[ph_tot].s1=0; (*ph)[ph_tot].s2=0; (*ph)[ph_tot].s3=0; (*ph)[ph_tot].num_scatt=0; (*ph)[ph_tot].weight=ph_weight_adjusted; (*ph)[ph_tot].nearest_block_index=0; (*ph)[ph_tot].type=INJECTED_PHOTON; //i for injected //printf("%d\n",ph_tot); ph_tot++; } k++; } } *ph_num=ph_tot; //save number of photons //printf(" %d: %d\n", *(ph_dens+(k-1)), *ph_num); free(ph_dens); free(p_comv);free(boost); free(l_boost); } void lorentzBoost(double *boost, double *p_ph, double *result, char object, FILE *fPtr) { //function to perform lorentz boost //if doing boost for an electron last argument is 'e' and there wont be a check for zero norm //if doing boost for a photon last argument is 'p' and there will be a check for zero norm double beta=0, gamma=0, *boosted_p=NULL; gsl_vector_view b=gsl_vector_view_array(boost, 3); //make boost pointer into vector gsl_vector_view p=gsl_vector_view_array(p_ph, 4); //make boost pointer into vector gsl_matrix *lambda1= gsl_matrix_calloc (4, 4); //create matrix thats 4x4 to do lorentz boost gsl_vector *p_ph_prime =gsl_vector_calloc(4); //create vestor to hold lorentz boosted vector /* fprintf(fPtr,"Boost: %e, %e, %e, %e\n",gsl_blas_dnrm2(&b.vector), *(boost+0), *(boost+1), *(boost+2)); fflush(fPtr); fprintf(fPtr,"4 Momentum to Boost: %e, %e, %e, %e\n",*(p_ph+0), *(p_ph+1), *(p_ph+2), *(p_ph+3)); fflush(fPtr); */ //if magnitude of fluid velocity is != 0 do lorentz boost otherwise dont need to do a boost if (gsl_blas_dnrm2(&b.vector) > 0) { //fprintf(fPtr,"in If\n"); //fflush(fPtr); beta=gsl_blas_dnrm2(&b.vector); gamma=1.0/sqrt(1-pow(beta, 2.0)); //fprintf(fPtr,"Beta: %e\tGamma: %e\n",beta,gamma ); //fflush(fPtr); //initalize matrix values gsl_matrix_set(lambda1, 0,0, gamma); gsl_matrix_set(lambda1, 0,1, -1*gsl_vector_get(&b.vector,0)*gamma); gsl_matrix_set(lambda1, 0,2, -1*gsl_vector_get(&b.vector,1)*gamma); gsl_matrix_set(lambda1, 0,3, -1*gsl_vector_get(&b.vector,2)*gamma); gsl_matrix_set(lambda1, 1,1, 1+((gamma-1)*(gsl_vector_get(&b.vector,0)*gsl_vector_get(&b.vector,0))/(beta*beta) ) ); gsl_matrix_set(lambda1, 1,2, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,1)/(beta*beta) ) )); gsl_matrix_set(lambda1, 1,3, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,2)/(beta*beta) ) )); gsl_matrix_set(lambda1, 2,2, 1+((gamma-1)*(gsl_vector_get(&b.vector,1)*gsl_vector_get(&b.vector,1))/(beta*beta) ) ); gsl_matrix_set(lambda1, 2,3, ((gamma-1)*(gsl_vector_get(&b.vector,1)* gsl_vector_get(&b.vector,2))/(beta*beta) ) ); gsl_matrix_set(lambda1, 3,3, 1+((gamma-1)*(gsl_vector_get(&b.vector,2)*gsl_vector_get(&b.vector,2))/(beta*beta) ) ); gsl_matrix_set(lambda1, 1,0, gsl_matrix_get(lambda1,0,1)); gsl_matrix_set(lambda1, 2,0, gsl_matrix_get(lambda1,0,2)); gsl_matrix_set(lambda1, 3,0, gsl_matrix_get(lambda1,0,3)); gsl_matrix_set(lambda1, 2,1, gsl_matrix_get(lambda1,1,2)); gsl_matrix_set(lambda1, 3,1, gsl_matrix_get(lambda1,1,3)); gsl_matrix_set(lambda1, 3,2, gsl_matrix_get(lambda1,2,3)); gsl_blas_dgemv(CblasNoTrans, 1, lambda1, &p.vector, 0, p_ph_prime ); /* fprintf(fPtr,"Lorentz Boost Matrix 0: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 0,0), gsl_matrix_get(lambda1, 0,1), gsl_matrix_get(lambda1, 0,2), gsl_matrix_get(lambda1, 0,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 1: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 1,0), gsl_matrix_get(lambda1, 1,1), gsl_matrix_get(lambda1, 1,2), gsl_matrix_get(lambda1, 1,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 2: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 2,0), gsl_matrix_get(lambda1, 2,1), gsl_matrix_get(lambda1, 2,2), gsl_matrix_get(lambda1, 2,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 3: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 3,0), gsl_matrix_get(lambda1, 3,1), gsl_matrix_get(lambda1, 3,2), gsl_matrix_get(lambda1, 3,3)); fflush(fPtr); fprintf(fPtr,"Before Check: %e %e %e %e\n ",gsl_vector_get(p_ph_prime, 0), gsl_vector_get(p_ph_prime, 1), gsl_vector_get(p_ph_prime, 2), gsl_vector_get(p_ph_prime, 3)); fflush(fPtr); */ //double check vector for 0 norm condition if photon if (object == 'p') { //fprintf(fPtr,"In if\n"); boosted_p=zeroNorm(gsl_vector_ptr(p_ph_prime, 0)); } else { boosted_p=gsl_vector_ptr(p_ph_prime, 0); } /* fprintf(fPtr,"After Check: %e %e %e %e\n ", *(boosted_p+0),*(boosted_p+1),*(boosted_p+2),*(boosted_p+3) ); fflush(fPtr); * */ } else { /* fprintf(fPtr,"in else"); fflush(fPtr); * */ //double check vector for 0 norm condition if (object=='p') { boosted_p=zeroNorm(p_ph); } else { //if 4 momentum isnt for photon and there is no boost to be done, we dont care about normality and just want back what was passed to lorentz boost boosted_p=gsl_vector_ptr(&p.vector, 0); } } //assign values to result *(result+0)=*(boosted_p+0); *(result+1)=*(boosted_p+1); *(result+2)=*(boosted_p+2); *(result+3)=*(boosted_p+3); //free up memory //free(boosted_p); gsl_matrix_free (lambda1); gsl_vector_free(p_ph_prime); } double *zeroNorm(double *p_ph) { //ensures zero norm condition of photon 4 monetum is held int i=0; double normalizing_factor=0; gsl_vector_view p=gsl_vector_view_array((p_ph+1), 3); //make last 3 elements of p_ph pointer into vector if (*(p_ph+0) != gsl_blas_dnrm2(&p.vector ) ) { normalizing_factor=(gsl_blas_dnrm2(&p.vector )); //fprintf(fPtr,"in zero norm if\n"); //fflush(fPtr); //go through and correct 4 momentum assuming the energy is correct *(p_ph+1)= ((*(p_ph+1))/(normalizing_factor))*(*(p_ph+0)); *(p_ph+2)= ((*(p_ph+2))/(normalizing_factor))*(*(p_ph+0)); *(p_ph+3)= ((*(p_ph+3))/(normalizing_factor))*(*(p_ph+0)); } /* if (pow((*(p_ph+0)),2) != ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) ) { printf("This isnt normalized in the function\nThe difference is: %e\n", pow((*(p_ph+0)),2) - ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) ); } */ //normalized within a factor of 10^-53 return p_ph; } int findNearestBlock(int array_num, double ph_x, double ph_y, double ph_z, double *x, double *y, double *z) { double dist=0, dist_min=1e15, block_dist=0; int min_index=0, j=0; dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved block_dist=3e9; while (dist_min==1e15) //if this is true, then the algorithm hasnt found blocks within the acceptable range given by block_dist { for(j=0;j<array_num;j++) { //if the distance between them is within 3e9, to restrict number of possible calculations, calulate the total distance between the box and photon #if DIMENSIONS == 2 if ((fabs(ph_x- (*(x+j)))<block_dist) && (fabs(ph_y- (*(y+j)))<block_dist)) { dist= pow(pow(ph_x- (*(x+j)), 2.0) + pow(ph_y- (*(y+j)) , 2.0),0.5); //fprintf(fPtr,"Dist calculated as: %e, index: %d\n", dist, j); //printf("In outer if statement, OLD: %e, %d\n", dist_min, min_index); if((dist<dist_min)) { //fprintf(fPtr,"In innermost if statement, OLD: %e, %d\n", dist_min, min_index); dist_min=dist; //save new minimum distance min_index=j; //save index //printf("New Min dist: %e, New min Index: %d, Array_Num: %d\n", dist_min, min_index, array_num); } } #elif DIMENSIONS == 3 if ((fabs(ph_x- (*(x+j)))<block_dist) && (fabs(ph_y- (*(y+j)))<block_dist) && (fabs(ph_z- (*(z+j)))<block_dist)) { dist= pow(pow(ph_x- (*(x+j)), 2.0) + pow(ph_y- (*(y+j)),2.0 ) + pow(ph_z- (*(z+j)) , 2.0),0.5); if((dist<dist_min)) { //printf("In innermost if statement, OLD: %e, %d\n", dist_min, min_index); dist_min=dist; //save new minimum distance min_index=j; //save index //fprintf(fPtr,"New Min dist: %e, New min Index: %d, Array_Num: %e\n", dist_min, min_index, array_num); } } #endif } block_dist*=10; //increase size of accepted distances for gris points, if dist_min==1e12 then the next time the acceptance range wil be larger } return min_index; } int findContainingBlock(int array_num, double ph_x, double ph_y, double ph_z, double *x, double *y, double *z, double *szx, double *szy, int old_block_index, int find_block_switch, FILE *fPtr) { int i=0, within_block_index=0; bool is_in_block=0; //boolean to determine if the photon is outside of a grid //can parallelize here to save time? for (i=0;i<array_num;i++) { is_in_block=checkInBlock(i, ph_x, ph_y, ph_z, x, y, z, szx, szy); if (is_in_block) { within_block_index=i; //change for loop index once the block is found so the code doesnt search the rest of the grids to see if the photon is within those grids i=array_num; } } //printf("Within Block Index: %d\n",within_block_index); //if ((strcmp(DIM_SWITCH, dim_3d_str)==0) || (riken_switch==1)) #if SIM_SWITCH == RIKEN || DIMENSIONS == 3 { fprintf(fPtr, "3D switch is: %d and SIM switch is: %d\n", DIMENSIONS, SIM_SWITCH); } #endif if (is_in_block==0) { fprintf(fPtr, "Couldn't find a block that the photon is in\nx: %e y:%e\n", ph_x, ph_y); fflush(fPtr); within_block_index=-1; } return within_block_index; } int checkInBlock(int block_index, double ph_x, double ph_y, double ph_z, double *x, double *y, double *z, double *szx, double *szy) { bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block double x0=0, x1=0, x2=0, sz_x0=0, sz_x1=0, sz_x2=0; //coordinate and sizes of grid block, in cartesian its x,y,z in spherical its r,theta,phi int return_val=0; //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { #if GEOMETRY == SPHERICAL { x0=pow(pow((*(x+block_index)),2.0)+pow((*(y+block_index)),2.0), 0.5); //radius x1=atan2((*(x+block_index)), (*(y+block_index))); //theta sz_x0=(*(szx+block_index)); sz_x1=(*(szy+block_index)); //pow(pow( ph_x, 2.0) + pow(ph_y, 2.0),0.5) atan2(ph_x, ph_y) is_in_block= (2*fabs( ph_x - x0)- sz_x0 <= 0) && (2*fabs(ph_y - x1 ) - sz_x1 <= 0); //ph_x is ph_r for this geometry } #else { x0=(*(x+block_index)); x1=(*(y+block_index)); sz_x0=(*(szx+block_index)); sz_x1=(*(szy+block_index)); is_in_block= (2*fabs(ph_x-x0)-sz_x0 <= 0) && (2*fabs(ph_y-x1)-sz_x1 <= 0); } #endif } #endif /* else { if (riken_switch==1) { x0=pow(pow((*(x+block_index)), 2.0) + pow((*(y+block_index)),2.0 ) + pow((*(z+block_index)) , 2.0),0.5); x1=acos((*(z+block_index))/pow(pow((*(x+block_index)), 2.0) + pow((*(y+block_index)),2.0 ) + pow((*(z+block_index)) , 2.0),0.5)); x2=atan2((*(y+block_index)), (*(x+block_index))); sz_x0=(*(szy+block_index)); sz_x1=(*(szx+block_index)); sz_x2=(*(szx+block_index)); is_in_block= (fabs(pow(pow( ph_x, 2.0) + pow(ph_y, 2.0)+pow(ph_z, 2.0),0.5) - x0) <= sz_x0/2.0) && (fabs(acos(ph_z/pow(pow(ph_x, 2.0) + pow(ph_y,2.0 ) + pow(ph_z , 2.0),0.5)) - x1 ) <= sz_x1/2.0) && (fabs(atan2(ph_y, ph_x) - x2 ) <= sz_x2/2.0); //not sure why the code was going to this line above here for spherical test } } */ if (is_in_block) { return_val=1; } else { return_val=0; } return return_val; } int findNearestPropertiesAndMinMFP( struct photon *ph, int num_ph, int array_num, double hydro_domain_x, double hydro_domain_y, double epsilon_b, double *x, double *y, double *z, double *szx, double *szy, double *velx, double *vely, double *velz, double *dens_lab,\ double *temp, double *all_time_steps, int *sorted_indexes, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr) { int i=0, min_index=0, ph_block_index=0, num_thread=1, thread_id=0; double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, ph_r=0, ph_theta=0; double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates double ph_v_norm=0, fl_v_norm=0, synch_x_sect=0; double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0 ; double rnd_tracker=0, n_dens_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0; #if defined(_OPENMP) num_thread=omp_get_num_threads(); //default is one above if theres no openmp usage #endif bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block int index=0, num_photons_find_new_element=0; double mfp=0,min_mfp=0, beta=0; double el_p[4]; double ph_p_comv[4], ph_p[4], fluid_beta[3]; //initialize gsl random number generator fo each thread const gsl_rng_type *rng_t; gsl_rng **rng; gsl_rng_env_setup(); rng_t = gsl_rng_ranlxs0; rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); rng[0]=rand; //#pragma omp parallel for num_threads(nt) for(i=1;i<num_thread;i++) { rng[i] = gsl_rng_alloc (rng_t); gsl_rng_set(rng[i],gsl_rng_get(rand)); } //go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away //can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius //or just parallelize this part here min_mfp=1e12; #pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, ph_r, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker, ph_p_comv, el_p, ph_p, fluid_beta) private(i) shared(min_mfp ) reduction(+:num_photons_find_new_element) for (i=0;i<num_ph; i++) { //fprintf(fPtr, "%d, %d,%e\n", i, ((ph+i)->nearest_block_index), ((ph+i)->weight)); //fflush(fPtr); if (find_nearest_block_switch==0) { ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault here } else { ph_block_index=0; // therefore if starting a new frame set index=0 to avoid this issue } //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { #if GEOMETRY == SPHERICAL ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to 2d spherical coordinate ph_y=((ph+i)->r2); ph_r=pow(ph_x*ph_x + ph_y*ph_y, 0.5); ph_theta=acos(ph_y/ph_r); //this is actually theta in this context ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); #elif GEOMETRY == CARTESIAN ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate (2d cartesian hydro coordinates) ph_y=((ph+i)->r2); ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); ph_r=pow(ph_x*ph_x + ph_y*ph_y, 0.5); #endif } #else { ph_x=((ph+i)->r0); ph_y=((ph+i)->r1); ph_z=((ph+i)->r2); ph_r=pow(ph_x*ph_x + ph_y*ph_y+ph_z*ph_z, 0.5); } #endif //printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y); //if the location of the photon is less than the domain of the hydro simulation then do all of this, otherwise assing huge mfp value so no scattering occurs and the next frame is loaded // absorbed photons have ph_block_index=-1, therefore if this value is not less than 0, calulate the mfp properly but doesnt work when go to new frame and find new indexes (will change b/c will get rid of these photons when printing) //alternatively make decision based on 0 weight if (((ph_y<hydro_domain_y) && (ph_x<hydro_domain_x)) && ((ph+i)->nearest_block_index != -1) ) //can use sorted index to see which photons have been absorbed efficiently before printing and get the indexes { #if GEOMETRY == SPHERICAL is_in_block=checkInBlock(ph_block_index, ph_r, ph_theta, ph_z, x, y, z, szx, szy); #elif GEOMETRY == CARTESIAN is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy); #endif //when rebinning photons can have comoving 4 momenta=0 and nearest_block_index=0 (and block 0 be the actual block the photon is in making it not refind the proper index and reclaulate the comoving 4 momenta) which can make counting synch scattered photons be thrown off, thus take care of this case by forcing the function to recalc things #if SYNCHROTRON_SWITCH == ON if ((ph_block_index==0) && ( ((ph+i)->comv_p0)+((ph+i)->comv_p1)+((ph+i)->comv_p2)+((ph+i)->comv_p3) == 0 ) ) { is_in_block=0; //say that photon is not in the block, force it to recompute things } #endif if (find_nearest_block_switch==0 && is_in_block) { //keep the saved grid index min_index=ph_block_index; } else { //find the new index of the block closest to the photon //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh //find the new index of the block that the photon is actually in #if DIMENSIONS == 2 { #if GEOMETRY == SPHERICAL min_index=findContainingBlock(array_num, ph_r, ph_theta, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr); #elif GEOMETRY == CARTESIAN min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr); #endif } #endif if (min_index != -1) { (ph+i)->nearest_block_index=min_index; //save the index if min_index != -1 //also recalculate the photons' comoving frequency in this new fluid element ph_p[0]=((ph+i)->p0); ph_p[1]=((ph+i)->p1); ph_p[2]=((ph+i)->p2); ph_p[3]=((ph+i)->p3); //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { fluid_beta[0]=(*(velx+min_index))*cos(ph_phi); fluid_beta[1]=(*(velx+min_index))*sin(ph_phi); fluid_beta[2]=(*(vely+min_index)); } #else { fluid_beta[0]=(*(velx+min_index)); fluid_beta[1]=(*(vely+min_index)); fluid_beta[2]=(*(velz+min_index)); } #endif lorentzBoost(&fluid_beta, &ph_p, &ph_p_comv, 'p', fPtr); ((ph+i)->comv_p0)=ph_p_comv[0]; ((ph+i)->comv_p1)=ph_p_comv[1]; ((ph+i)->comv_p2)=ph_p_comv[2]; ((ph+i)->comv_p3)=ph_p_comv[3]; num_photons_find_new_element+=1; } else { fprintf(fPtr, "Photon number %d FLASH index not found, making sure it doesnt scatter.\n", i); } } //if min_index!= -1 (know which fluid element photon is in) do all this stuff, otherwise make sure photon doesnt scatter if (min_index != -1) { //fprintf(fPtr,"Min Index: %d\n", min_index); //save values (n_dens_lab_tmp)= (*(dens_lab+min_index)); (n_vx_tmp)= (*(velx+min_index)); (n_vy_tmp)= (*(vely+min_index)); (n_temp_tmp)= (*(temp+min_index)); //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { (n_vz_tmp)= (*(velz+min_index)); } #endif //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { fl_v_x=(*(velx+min_index))*cos(ph_phi); fl_v_y=(*(velx+min_index))*sin(ph_phi); fl_v_z=(*(vely+min_index)); } #else { fl_v_x=(*(velx+min_index)); fl_v_y=(*(vely+min_index)); fl_v_z=(*(velz+min_index)); } #endif fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5); ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5); //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { beta=pow((n_vx_tmp*n_vx_tmp)+(n_vy_tmp*n_vy_tmp),0.5); } #else { beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5); } #endif *(ph_p+0)=((ph+i)->p0); *(ph_p+1)=((ph+i)->p1); *(ph_p+2)=((ph+i)->p2); *(ph_p+3)=((ph+i)->p3); //ph_p_comv[0]=((ph+i)->comv_p0); //ph_p_comv[1]=((ph+i)->comv_p1); //ph_p_comv[2]=((ph+i)->comv_p2); //ph_p_comv[3]=((ph+i)->comv_p3); //printf("ph: p0 %e p1 %e p2 %e p3 %e\n", *(ph_p_comv+0), *(ph_p_comv+1), *(ph_p_comv+2), *(ph_p_comv+3)); //singleElectron(&el_p[0], n_temp_tmp, &ph_p_comv[0], rng[omp_get_thread_num()], fPtr); //get random electron //printf("after singleElectron n_temp_tmp %e from ptr %e n_dens_tmp %e from ptr %e\n", n_temp_tmp, (*(temp+min_index)), n_dens_tmp, (*(dens+min_index))); //printf("Chosen el: p0 %e p1 %e p2 %e p3 %e\nph: p0 %e p1 %e p2 %e p3 %e\n", *(el_p+0), *(el_p+1), *(el_p+2), *(el_p+3), *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3)); //synch_x_sect=synCrossSection(n_dens_tmp/M_P, n_temp_tmp, ph_p_comv[0]*C_LIGHT/PL_CONST, sqrt((el_p[0]*el_p[0]/(M_EL*M_EL*C_LIGHT*C_LIGHT))-1), epsilon_b); //printf("i: %d flash_array_idx %d synch_x_sect %e freq %e temp %e el_dens %e\n", i, min_index, synch_x_sect, *(ph_p+0)*C_LIGHT/PL_CONST, n_temp_tmp, n_dens_tmp/M_P); //if (synch_x_sect==0) //{ //*(will_scatter+i)=1; //this photon will scatter b/c probability of absorption=0 //} /* else { if (gsl_rng_uniform_pos(rng[omp_get_thread_num()])>(THOM_X_SECT/(THOM_X_SECT+synch_x_sect))) { //this photon will be absorbed *(will_scatter+i)=0; } else { *(will_scatter+i)=1; } } photons can onlt scatter now */ //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case rnd_tracker=0; #if defined(_OPENMP) thread_id=omp_get_thread_num(); #endif rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]); //printf("Rnd_tracker: %e Thread number %d \n",rnd_tracker, omp_get_thread_num() ); //mfp=(-1)*log(rnd_tracker)*(M_P/((n_dens_tmp))/(THOM_X_SECT)); ///(1.0-beta*((n_cosangle)))) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths DO EVERYTHING IN COMOV FRAME NOW mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //if (mfp/C_LIGHT < 1e-100) //{ // fprintf("Photon %d has a mfp of %d\n", i, mfp); // exit(0); //} } else { mfp=min_mfp; } } else { mfp=min_mfp; //fprintf(fPtr,"Photon %d In ELSE\n", i); //exit(0); } *(all_time_steps+i)=mfp/C_LIGHT; } //exit(0); //free rand number generator for (i=1;i<num_thread;i++) { gsl_rng_free(rng[i]); } free(rng); //printf("HERE\n"); for (i=0;i<num_ph;i++) { *(sorted_indexes+i)= i; //save indexes to array to use in qsort } //printf("before QSORT\n"); #if (defined _GNU_SOURCE || defined __GNU__ || defined __linux__) qsort_r(sorted_indexes, num_ph, sizeof (int), compare2, all_time_steps); #elif (defined __APPLE__ || defined __MACH__ || defined __DARWIN__ || defined __FREEBSD__ || defined __BSD__ || defined OpenBSD3_1 || defined OpenBSD3_9) qsort_r(sorted_indexes, num_ph, sizeof (int), all_time_steps, compare); #else #error Cannot detect operating system #endif //for (i=0;i<num_ph;i++) //{ // fprintf(fPtr, "Qsort: %d GSL: %d\n", *(sorted_indexes_2+i), *(sorted_indexes+i)); //} //exit(0); //print number of times we had to refind the index of the elemtn photons were located in if (find_nearest_block_switch!=0) { num_photons_find_new_element=0; //force this to be 0 since we forced MCRaT to find the indexes for all the photons here } //fprintf(fPtr, "MCRat had to refind where %d photons were located in the grid\n", num_photons_find_new_element); //(*time_step)=*(all_time_steps+(*(sorted_indexes+0))); //dont need to return index b/c photonEvent doesnt use this, but mcrat.c uses this info //index= *(sorted_indexes+0);//first element of sorted array //free(el_p);free(ph_p_comv); return num_photons_find_new_element; } int compare (void *ar, const void *a, const void *b) { //from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/ int aa = *(int *) a; int bb = *(int *) b; double *arr=NULL; arr=ar; //printf("%d, %d\n", aa, bb); //printf("%e, %e\n", arr[aa] , arr[bb]); //return (aa - bb); /* if (arr[aa] < arr[bb]) return -1; if (arr[aa] == arr[bb]) return 0; if (arr[aa] > arr[bb]) return 1; */ return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb])); } int compare2 ( const void *a, const void *b, void *ar) { //have 2 compare funcions b/c of changes in qsort_r between BSD and GNU //from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/ int aa = *(int *) a; int bb = *(int *) b; double *arr=NULL; arr=ar; //printf("%d, %d\n", aa, bb); //printf("%e, %e\n", arr[aa] , arr[bb]); //return (aa - bb); /* if (arr[aa] < arr[bb]) return -1; if (arr[aa] == arr[bb]) return 0; if (arr[aa] > arr[bb]) return 1; */ return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb])); } int interpolatePropertiesAndMinMFP( struct photon *ph, int num_ph, int array_num, double *time_step, double *x, double *y, double *z, double *szx, double *szy, double *velx, double *vely, double *velz, double *dens_lab,\ double *temp, double *n_dens_lab, double *n_vx, double *n_vy, double *n_vz, double *n_temp, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr) { /* * THIS FUNCTION IS WRITTEN JUST FOR 2D SIMS AS OF NOW, not used */ int i=0, j=0, min_index=0, ph_block_index=0, thread_id=0; int left_block_index=0, right_block_index=0, bottom_block_index=0, top_block_index=0, all_adjacent_block_indexes[4]; double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, dist=0, left_dist_min=0, right_dist_min=0, top_dist_min=0, bottom_dist_min=0, dv=0, v=0; double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates double r=0, theta=0; double ph_v_norm=0, fl_v_norm=0; double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0; double rnd_tracker=0, n_dens_lab_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0; int num_thread=2;//omp_get_max_threads(); bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block int index=0; double mfp=0,min_mfp=0, beta=0; //initialize gsl random number generator fo each thread const gsl_rng_type *rng_t; gsl_rng **rng; gsl_rng_env_setup(); rng_t = gsl_rng_ranlxs0; rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); rng[0]=rand; //#pragma omp parallel for num_threads(nt) for(i=1;i<num_thread;i++) { rng[i] = gsl_rng_alloc (rng_t); gsl_rng_set(rng[i],gsl_rng_get(rand)); } //go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away //can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius //or just parallelize this part here min_mfp=1e12; #pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp ) for (i=0;i<num_ph; i++) { //printf("%d, %e,%e\n", i, ((ph+i)->r0), ((ph+i)->r1)); if (find_nearest_block_switch==0) { ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault } else { ph_block_index=0; //if starting a new frame set index=0 to avoid this issue } //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate ph_y=((ph+i)->r2); ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); } #else { ph_x=((ph+i)->r0); ph_y=((ph+i)->r1); ph_z=((ph+i)->r2); } #endif //printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y); is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy); if (find_nearest_block_switch==0 && is_in_block) { //keep the saved grid index min_index=ph_block_index; } else { //find the new index of the block closest to the photon //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh //find the new index of the block that the photon is actually in min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr); (ph+i)->nearest_block_index=min_index; //save the index } //look for the blocks surounding the block of interest and order them by the left_dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved right_dist_min=1e15; top_dist_min=1e15; bottom_dist_min=1e15; for (j=0;j<array_num;j++) { //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)) , 2.0),0.5); } #else { dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)),2.0 ) + pow((*(z+min_index))- (*(z+j)) , 2.0),0.5); } #endif if ((*(x+j))<(*(x+min_index)) && (dist < left_dist_min) ) { left_block_index=j; left_dist_min=dist; } else if ((*(x+j))>(*(x+min_index)) && (dist < right_dist_min)) { right_block_index=j; right_dist_min=dist; } if ((*(y+j))<(*(y+min_index)) && (dist < bottom_dist_min) ) { bottom_block_index=j; bottom_dist_min=dist; } else if ((*(y+j))>(*(y+min_index)) && (dist < top_dist_min) ) { top_block_index=j; top_dist_min=dist; } } all_adjacent_block_indexes[0]=left_block_index; all_adjacent_block_indexes[1]=right_block_index; all_adjacent_block_indexes[2]=bottom_block_index; all_adjacent_block_indexes[3]=top_block_index; //do a weighted average of the 4 nearest grids based on volume v=0; (n_dens_lab_tmp)=0; (n_vx_tmp)= 0; (n_vy_tmp)= 0; (n_temp_tmp)= 0; (n_vz_tmp)= 0; for (j=0;j<4;j++) { #if SIM_SWITCH == RIKEN { r=pow(pow((*(x+all_adjacent_block_indexes[j])),2.0)+pow((*(y+all_adjacent_block_indexes[j])),2.0), 0.5); theta=atan2((*(x+all_adjacent_block_indexes[j])), (*(y+all_adjacent_block_indexes[j]))); dv=2.0*M_PI*pow(r,2)*sin(theta)*(*(szx+all_adjacent_block_indexes[j]))*(*(szy+all_adjacent_block_indexes[j])) ; } #else { //using FLASH dv=2.0*M_PI*(*(x+all_adjacent_block_indexes[j]))*pow(*(szx+all_adjacent_block_indexes[j]),2.0) ; } #endif v+=dv; //save values (n_dens_lab_tmp)+= (*(dens_lab+all_adjacent_block_indexes[j]))*dv; (n_vx_tmp)+= (*(velx+all_adjacent_block_indexes[j]))*dv; (n_vy_tmp)+= (*(vely+all_adjacent_block_indexes[j]))*dv; (n_temp_tmp)+= (*(temp+all_adjacent_block_indexes[j]))*dv; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { (n_vz_tmp)+= (*(velz+all_adjacent_block_indexes[j]))*dv; } #endif } //fprintf(fPtr,"Outside\n"); //save values (n_dens_lab_tmp)/= v; (n_vx_tmp)/= v; (n_vy_tmp)/= v; (n_temp_tmp)/= v; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { (n_vz_tmp)/= v; } #endif //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { fl_v_x=n_vx_tmp*cos(ph_phi); fl_v_y=n_vx_tmp*sin(ph_phi); fl_v_z=n_vy_tmp; } #else { fl_v_x=n_vx_tmp; fl_v_y=n_vy_tmp; fl_v_z=n_vz_tmp; } #endif fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5); ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5); //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5); } #else { beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5); } #endif //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case rnd_tracker=0; #if defined(_OPENMP) thread_id=omp_get_thread_num(); #endif rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]); mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths #pragma omp critical if ( mfp<min_mfp) { min_mfp=mfp; n_dens_lab_min= n_dens_lab_tmp; n_vx_min= n_vx_tmp; n_vy_min= n_vy_tmp; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { n_vz_min= n_vz_tmp; } #endif n_temp_min= n_temp_tmp; index=i; //fprintf(fPtr, "Thread is %d. new min: %e for photon %d with block properties: %e, %e, %e Located at: %e, %e, Dist: %e\n", omp_get_thread_num(), mfp, index, n_vx_tmp, n_vy_tmp, n_temp_tmp, *(x+min_index), *(y+min_index), dist_min); //fflush(fPtr); #pragma omp flush(min_mfp) } } //free rand number generator for (i=1;i<num_thread;i++) { gsl_rng_free(rng[i]); } free(rng); *(n_dens_lab)= n_dens_lab_min; *(n_vx)= n_vx_min; *(n_vy)= n_vy_min; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { *(n_vz)= n_vz_min; } #endif *(n_temp)= n_temp_min; (*time_step)=min_mfp/C_LIGHT; return index; } void updatePhotonPosition(struct photon *ph, int num_ph, double t, FILE *fPtr) { //move photons by speed of light int i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double old_position=0, new_position=0, divide_p0=0; #pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0) for (i=0;i<num_ph;i++) { if (((ph+i)->type != SYNCHROTRON_POOL_PHOTON) && ((ph+i)->weight != 0)) { old_position= pow( pow((ph+i)->r0,2)+pow((ph+i)->r1,2)+pow((ph+i)->r2,2), 0.5 ); //uncommented checks since they were not necessary anymore divide_p0=1.0/((ph+i)->p0); ((ph+i)->r0)+=((ph+i)->p1)*divide_p0*C_LIGHT*t; //update x position ((ph+i)->r1)+=((ph+i)->p2)*divide_p0*C_LIGHT*t;//update y ((ph+i)->r2)+=((ph+i)->p3)*divide_p0*C_LIGHT*t;//update z new_position= pow( pow((ph+i)->r0,2)+pow((ph+i)->r1,2)+pow((ph+i)->r2,2), 0.5 ); /* if ((new_position-old_position)/t > C_LIGHT) { fprintf(fPtr, "PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\n", i, ((new_position-old_position)/t)/C_LIGHT); } */ //if ( (ph+i)->s0 != 1) { // fprintf(fPtr, "PHOTON NUMBER %d DOES NOT HAVE I=1. Instead it is: %e\n", i, (ph+i)->s0); } //printf("In update function: %e, %e, %e, %e, %e, %e, %e\n",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) ); } } //printf("In update function: %e, %e, %e, %e\n",t, ((ph)->p1)/((ph)->p0), ((ph)->p2)/((ph)->p0), ((ph)->p3)/((ph)->p0) ); } void mullerMatrixRotation(double theta, double *s, FILE *fPtr) { //makes a CCW rotation od the stokes parameters when the photon velocity vector is pointed towards the observer, follows Lundman gsl_matrix *M= gsl_matrix_calloc (4, 4); //create matrix thats 4x4 to do rotation as defined in McMaster 1961 (has it to rotate CW in that paper) gsl_vector *result= gsl_vector_alloc(4); gsl_vector_view stokes; stokes=gsl_vector_view_array(s, 4); //fprintf(fPtr, "sokes parameter before= %e %e %e %e\n", gsl_vector_get(&stokes.vector, 0), gsl_vector_get(&stokes.vector, 1), gsl_vector_get(&stokes.vector, 2), gsl_vector_get(&stokes.vector, 3)); gsl_matrix_set(M, 0,0,1); gsl_matrix_set(M, 3,3,1); gsl_matrix_set(M, 1,1,cos(2*theta)); gsl_matrix_set(M, 2,2,cos(2*theta)); gsl_matrix_set(M, 1,2,-1*sin(2*theta)); gsl_matrix_set(M, 2,1,sin(2*theta)); gsl_blas_dgemv(CblasNoTrans, 1, M, &stokes.vector, 0, result); //Ms=s //fprintf(fPtr, "stokes parameter after= %e %e %e %e\n\n", gsl_vector_get(result, 0), gsl_vector_get(result, 1), gsl_vector_get(result, 2), gsl_vector_get(result, 3)); //save back to the original stokes vector *(s+0)=gsl_vector_get(result, 0); *(s+1)=gsl_vector_get(result, 1); *(s+2)=gsl_vector_get(result, 2); *(s+3)=gsl_vector_get(result, 3); gsl_vector_free(result); gsl_matrix_free (M); } void findXY(double *v_ph, double *vector, double *x, double *y) { //finds the stokes plane coordinate x,y axis for the photon velocity with respect to some reference vector //assumes that pointers point to array of 3 doubles in length double norm=0; *(y+0)= ((*(v_ph+1))*(*(vector+2))-(*(v_ph+2))*(*(vector+1))); *(y+1)= -1*((*(v_ph+0))*(*(vector+2))-(*(v_ph+2))*(*(vector+0))); *(y+2)= ((*(v_ph+0))*(*(vector+1))-(*(v_ph+1))*(*(vector+0))); // vector X v_ph norm=1.0/sqrt( (*(y+0))*(*(y+0)) + (*(y+1))*(*(y+1)) + (*(y+2))*(*(y+2))); *(y+0) *= norm; *(y+1) *= norm; *(y+2) *= norm; *(x+0)= (*(y+1))*(*(v_ph+2))-(*(y+2))*(*(v_ph+1)); *(x+1)= -1*((*(y+0))*(*(v_ph+2))-(*(y+2))*(*(v_ph+0))); *(x+2)= (*(y+0))*(*(v_ph+1))-(*(y+1))*(*(v_ph+0)); norm=1.0/sqrt( (*(x+0))*(*(x+0)) + (*(x+1))*(*(x+1)) + (*(x+2))*(*(x+2))); *(x+0) *= norm; *(x+1) *= norm; *(x+2) *= norm; } double findPhi(double *x_old, double *y_old, double *x_new, double *y_new) { //find the angle to rotate the stokes vector to transform from one set of stokes coordinates to another //this is given by Lundman gsl_vector_view y=gsl_vector_view_array(y_old, 3); gsl_vector_view x=gsl_vector_view_array(x_old, 3); gsl_vector_view y_prime=gsl_vector_view_array(y_new, 3); gsl_vector_view x_prime=gsl_vector_view_array(x_new, 3); double factor=0, dot_prod_result=0; gsl_blas_ddot(&x.vector, &y_prime.vector, &dot_prod_result); if (dot_prod_result>0) { factor=1; } else if (dot_prod_result<0) { factor=-1; } else { factor=0; } gsl_blas_ddot(&y.vector, &y_prime.vector, &dot_prod_result); if ((dot_prod_result<-1) || (dot_prod_result>1)) { //printf("The old dot poduct was %e, the new one is %e\n",dot_prod_result, round(dot_prod_result)); dot_prod_result=round(dot_prod_result);//do this rounding so numerical error that causes value to be <-1 or >1 gets rounded and becomes a real value if its close enough to these limits } return -1*factor*acos(dot_prod_result); } void stokesRotation(double *v, double *v_ph, double *v_ph_boosted, double *s, FILE *fPtr) { //takes 3 velocities of the initial photon, v_ph, the boosted photon, v_ph_boosted. and the boost vector, v double z_hat[3]={0,0,1}; //z to calulate stokes double x[3]={0,0,0}, y[3]={0,0,0}, x_new[3]={0,0,0}, y_new[3]={0,0,0};//initalize arrays to hold stokes coordinate system double phi=0; //if (i==0) { //find stokes coordinate sys in orig frame with respect to z axis findXY(v_ph, &z_hat, &x, &y); } //find stokes coordinate sys in orig frame with respect to boost vector findXY(v_ph, v, &x_new, &y_new); phi=findPhi(x, y, x_new, y_new);//now find rotation between the two coordinate systems //rotate the stokes vector now to put it in the coordinate system fo the boosted photon and the boost evctor mullerMatrixRotation(phi, s, fPtr); /* if ( isnan(*(s+0)) || isnan(*(s+1)) || isnan(*(s+2)) || isnan(*(s+3)) ) { printf("A stokes value is nan\n\n"); } */ //find the new coordinates of the rotated stokes vector with the boosted photon and the boost vector findXY(v_ph_boosted, v, &x, &y); //find stokes coordinate sys in orig frame with respect to z axis findXY(v_ph_boosted, &z_hat, &x_new, &y_new); phi=findPhi(x, y, x_new, y_new);//now find rotation between the two coordinate systems //do the rotation of the stokes vector to put it in the coordinate system of the boosted photon and the z axis mullerMatrixRotation(phi, s, fPtr); /* if ( isnan(*(s+0)) || isnan(*(s+1)) || isnan(*(s+2)) || isnan(*(s+3)) ) { printf("A stokes value is nan\n\n"); } */ } double photonEvent(struct photon *ph, int num_ph, double dt_max, double *all_time_steps, int *sorted_indexes, double *all_flash_vx, double *all_flash_vy, double *all_flash_vz, double *all_fluid_temp, int *scattered_ph_index, int *frame_scatt_cnt, int *frame_abs_cnt, gsl_rng * rand, FILE *fPtr) { //function to perform single photon scattering int i=0, index=0, ph_index=0, event_did_occur=0; //variable event_did_occur is to keep track of wether a scattering or absorption actually occured or not, double scatt_time=0, old_scatt_time=0; //keep track of new time to scatter vs old time to scatter to know how much to incrementally propagate the photons if necessary double phi=0, theta=0; //phi and theta for the 4 momentum double ph_phi=0, flash_vx=0, flash_vy=0, flash_vz=0, fluid_temp=0; double *ph_p=malloc(4*sizeof(double)); //pointer to hold only photon 4 momentum @ start double *el_p_comov=malloc(4*sizeof(double));//pointer to hold the electron 4 momenta in comoving frame double *ph_p_comov=malloc(4*sizeof(double));//pointer to hold the comoving photon 4 momenta double *fluid_beta=malloc(3*sizeof(double));//pointer to hold fluid velocity vector double *negative_fluid_beta=malloc(3*sizeof(double));//pointer to hold negative fluid velocity vector double *s=malloc(4*sizeof(double)); //vector to hold the stokes parameters for a given photon i=0; old_scatt_time=0; event_did_occur=0; //fprintf(fPtr,"In this function Num_ph %d\n", num_ph); //fflush(fPtr); while (i<num_ph && event_did_occur==0 ) { ph_index=(*(sorted_indexes+i)); scatt_time= *(all_time_steps+ph_index); //get the time until the photon scatters //IF THE TIME IS GREATER THAN dt_max dont let the photons positions be updated if (scatt_time<dt_max) { updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr); //fprintf(fPtr,"i: %d, Photon: %d, Delta t=%e\n", i, ph_index, scatt_time-old_scatt_time); //fflush(fPtr); //if the photon should scatter then do so, will_scatter==1 //if (*(will_scatter+ph_index) != 0 ) ont need b/c all photns are able to scatter and none can be explicitly absorbed //{ //WHAT IF THE PHOTON MOVES TO A NEW BLOCK BETWEEN WHEN WE CALC MFP AND MOVE IT TO DO THE SCATTERING???? //it mostly happens at low optical depth, near the photosphere so we would have a large mfp anyways so we probably wouldn't be in this function in that case index=(ph+ph_index)->nearest_block_index; //the sorted_indexes gives index of photon with smallest time to potentially scatter then extract the index of the block closest to that photon flash_vx=*(all_flash_vx+ index); flash_vy=*(all_flash_vy+ index); fluid_temp=*(all_fluid_temp+ index); //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { flash_vz=*(all_flash_vz+ index); } #endif ph_phi=atan2(((ph+ph_index)->r1), (((ph+ph_index)->r0))); /* if (isnan((ph+ph_index)->r0) || isnan((ph+ph_index)->r1) || isnan((ph+ph_index)->r2)) { printf("Not a number\n"); } fprintf(fPtr,"ph_phi=%e\n", ph_phi); fflush(fPtr); */ //convert flash coordinated into MCRaT coordinates //printf("Getting fluid_beta\n"); //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { (*(fluid_beta+0))=flash_vx*cos(ph_phi); (*(fluid_beta+1))=flash_vx*sin(ph_phi); (*(fluid_beta+2))=flash_vy; } #else { (*(fluid_beta+0))=flash_vx; (*(fluid_beta+1))=flash_vy; (*(fluid_beta+2))=flash_vz; } #endif /* fprintf(fPtr,"FLASH v: %e, %e\n", flash_vx,flash_vy); fflush(fPtr); */ //fill in photon 4 momentum //printf("filling in 4 momentum in photonScatter for photon index %d\n", ph_index); //if ((ph+ph_index)->type == SYNCHROTRON_POOL_PHOTON) { //printf("The scattering photon is a seed photon w/ comv freq %e Hz.\n", ((ph+ph_index)->comv_p0)*C_LIGHT/PL_CONST); //*nu_c_scatt=((ph+ph_index)->comv_p0)*C_LIGHT/PL_CONST;//dont need this anymore b/c the SYNCHROTRON_POOL_PHOTON photon doesnt move from its cell } *(ph_p+0)=((ph+ph_index)->p0); *(ph_p+1)=((ph+ph_index)->p1); *(ph_p+2)=((ph+ph_index)->p2); *(ph_p+3)=((ph+ph_index)->p3); *(ph_p_comov+0)=((ph+ph_index)->comv_p0); *(ph_p_comov+1)=((ph+ph_index)->comv_p1); *(ph_p_comov+2)=((ph+ph_index)->comv_p2); *(ph_p_comov+3)=((ph+ph_index)->comv_p3); //fill in stokes parameters *(s+0)=((ph+ph_index)->s0); //I ==1 *(s+1)=((ph+ph_index)->s1); //Q/I *(s+2)=((ph+ph_index)->s2); //U/I *(s+3)=((ph+ph_index)->s3); //V/I /* fprintf(fPtr,"Unscattered Photon in Lab frame: %e, %e, %e,%e, %e, %e, %e\nStokes params %e %e %e %e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3), (ph->r0), (ph->r1), (ph->r2), *(s+0), *(s+1), *(s+2), *(s+3)); fflush(fPtr); fprintf(fPtr,"Fluid Beta: %e, %e, %e\n", *(fluid_beta+0),*(fluid_beta+1), *(fluid_beta+2)); fflush(fPtr); */ //first we bring the photon to the fluid's comoving frame //lorentzBoost(fluid_beta, ph_p, ph_p_comov, 'p', fPtr); //*(ph_p_comov+0)=((ph+ph_index)->comv_p0); //*(ph_p_comov+1)=((ph+ph_index)->comv_p1); //*(ph_p_comov+2)=((ph+ph_index)->comv_p2); //*(ph_p_comov+3)=((ph+ph_index)->comv_p3); /* fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3); fflush(fPtr); fprintf(fPtr, "Before Scattering, In Comov_frame:\n"); fflush(fPtr); fprintf(fPtr, "ph_comov: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); fflush(fPtr); */ //fprintf(fPtr, "Theta: %e Phi %e Lab: x_tilde: %e, %e, %e, y_tilde: %e %e %e\n", theta, phi, *(x_tilde+0), *(x_tilde+1), *(x_tilde+2), *(y_tilde+0), *(y_tilde+1), *(y_tilde+2)); //then rotate the stokes plane by some angle such that we are in the stokes coordinat eystsem after the lorentz boost //if (STOKES_SWITCH != 0) #if STOKES_SWITCH == ON { stokesRotation(fluid_beta, (ph_p+1), (ph_p_comov+1), s, fPtr); } #endif //exit(0); //second we generate a thermal electron at the correct temperature singleElectron(el_p_comov, fluid_temp, ph_p_comov, rand, fPtr); //fprintf(fPtr,"el_comov: %e, %e, %e,%e\n", *(el_p_comov+0), *(el_p_comov+1), *(el_p_comov+2), *(el_p_comov+3)); //fflush(fPtr); //third we perform the scattering and save scattered photon 4 monetum in ph_p_comov @ end of function event_did_occur=singleScatter(el_p_comov, ph_p_comov, s, rand, fPtr); //fprintf(fPtr,"After Scattering, After Lorentz Boost to Comov frame: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); //fflush(fPtr); //event_did_occur=0; if (event_did_occur==1) { //fprintf(fPtr,"Within the if!\n"); //fflush(fPtr); //if the scattering occured have to uodate the phtoon 4 momentum. if photon didnt scatter nothing changes //fourth we bring the photon back to the lab frame *(negative_fluid_beta+0)=-1*( *(fluid_beta+0)); *(negative_fluid_beta+1)=-1*( *(fluid_beta+1)); *(negative_fluid_beta+2)=-1*( *(fluid_beta+2)); lorentzBoost(negative_fluid_beta, ph_p_comov, ph_p, 'p', fPtr); //fprintf(fPtr,"Scattered Photon in Lab frame: %e, %e, %e,%e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3)); //fflush(fPtr); #if STOKES_SWITCH == ON { stokesRotation(negative_fluid_beta, (ph_p_comov+1), (ph_p+1), s, fPtr); //rotate to boost back to lab frame //save stokes parameters ((ph+ph_index)->s0)= *(s+0); //I ==1 ((ph+ph_index)->s1)= *(s+1); ((ph+ph_index)->s2)= *(s+2); ((ph+ph_index)->s3)= *(s+3); } #endif if (((*(ph_p+0))*C_LIGHT/1.6e-9) > 1e4) { fprintf(fPtr,"Extremely High Photon Energy!!!!!!!!\n"); fflush(fPtr); } //fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3); //fprintf(fPtr, "Old: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); //assign the photon its new lab 4 momentum ((ph+ph_index)->p0)=(*(ph_p+0)); ((ph+ph_index)->p1)=(*(ph_p+1)); ((ph+ph_index)->p2)=(*(ph_p+2)); ((ph+ph_index)->p3)=(*(ph_p+3)); //assign it the comoving frame 4 momentum ((ph+ph_index)->comv_p0)=(*(ph_p_comov+0)); ((ph+ph_index)->comv_p1)=(*(ph_p_comov+1)); ((ph+ph_index)->comv_p2)=(*(ph_p_comov+2)); ((ph+ph_index)->comv_p3)=(*(ph_p_comov+3)); //printf("Done assigning values to original struct\n"); //incremement that photons number of scatterings ((ph+ph_index)->num_scatt)+=1; *frame_scatt_cnt+=1; //incrememnt total number of scatterings } } else { // if the photon scatt_time > dt_max //have to adjust the time properly so that the time si now appropriate for the next frame scatt_time=dt_max; updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr); event_did_occur=1; //set equal to 1 to get out of the loop b/c other subsequent photons will have scatt_time > dt_max } old_scatt_time=scatt_time; i++; } //exit(0); *scattered_ph_index=ph_index; //save the index of the photon that was scattered //fprintf(fPtr,"scattered_ph_index: %d %d\n", *scattered_ph_index, (*(sorted_indexes+i-1))); //fflush(fPtr); free(el_p_comov); free(ph_p_comov); free(fluid_beta); free(negative_fluid_beta); free(ph_p); free(s); ph_p=NULL;negative_fluid_beta=NULL;ph_p_comov=NULL; el_p_comov=NULL; //retrun total time elapsed to scatter a photon return scatt_time; } void singleElectron(double *el_p, double temp, double *ph_p, gsl_rng * rand, FILE *fPtr) { //generates an electron with random energy double factor=0, gamma=0; double y_dum=0, f_x_dum=0, x_dum=0, beta_x_dum=0, beta=0, phi=0, theta=0, ph_theta=0, ph_phi=0; gsl_matrix *rot= gsl_matrix_calloc (3, 3); //create matrix thats 3x3 to do rotation gsl_vector_view el_p_prime ; //create vector to hold rotated electron 4 momentum gsl_vector *result=gsl_vector_alloc (3); //fprintf(fPtr, "Temp in singleElectron: %e\n", temp); if (temp>= 1e7) { //printf("In if\n"); factor=K_B*temp/(M_EL*pow(C_LIGHT,2.0)); y_dum=1; //initalize loop to get a random gamma from the distribution of electron velocities f_x_dum=0; while ((isnan(f_x_dum) !=0) || (y_dum>f_x_dum) ) { x_dum=gsl_rng_uniform_pos(rand)*(1+100*factor); beta_x_dum=pow(1-(pow(x_dum, -2.0)) ,0.5); y_dum=gsl_rng_uniform(rand)/2.0; f_x_dum=pow(x_dum,2)*(beta_x_dum/gsl_sf_bessel_Kn (2, 1.0/factor))*exp(-1*x_dum/factor); // //fprintf(fPtr,"Choosing a Gamma: xdum: %e, f_x_dum: %e, y_dum: %e\n", x_dum, f_x_dum, y_dum); } gamma=x_dum; } else { //printf("In else\n"); factor=pow(K_B*temp/M_EL,0.5); //calculate a random gamma from 3 random velocities drawn from a gaussian distribution with std deviation of "factor" gamma=pow( 1- (pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+ pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2) ) ,-0.5); //each vel direction is normal distribution -> maxwellian when multiplied } //fprintf(fPtr,"Chosen Gamma: %e\n",gamma); beta=pow( 1- (1/pow( gamma,2.0 )) ,0.5); //printf("Beta is: %e in singleElectron\n", beta); phi=gsl_rng_uniform(rand)*2*M_PI; y_dum=1; //initalize loop to get a random theta f_x_dum=0; while (y_dum>f_x_dum) { y_dum=gsl_rng_uniform(rand)*1.3; x_dum=gsl_rng_uniform(rand)*M_PI; f_x_dum=sin(x_dum)*(1-(beta*cos(x_dum))); } theta=x_dum; //fprintf(fPtr,"Beta: %e\tPhi: %e\tTheta: %e\n",beta,phi, theta); //fill in electron 4 momentum NOT SURE WHY THE ORDER IS AS SUCH SEEMS TO BE E/c, pz,py,px!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! *(el_p+0)=gamma*(M_EL)*(C_LIGHT); *(el_p+1)=gamma*(M_EL)*(C_LIGHT)*beta*cos(theta); *(el_p+2)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*sin(phi); *(el_p+3)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*cos(phi); //printf("Old: %e, %e, %e,%e\n", *(el_p+0), *(el_p+1), *(el_p+2), *(el_p+3)); el_p_prime=gsl_vector_view_array((el_p+1), 3); //find angles of photon NOT SURE WHY WERE CHANGING REFERENCE FRAMES HERE???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ph_phi=atan2(*(ph_p+2), *(ph_p+3)); //Double Check ph_theta=atan2(pow( pow(*(ph_p+2),2)+ pow(*(ph_p+3),2) , 0.5) , (*(ph_p+1)) ); //printf("Calculated Photon phi and theta in singleElectron:%e, %e\n", ph_phi, ph_theta); //fill in rotation matrix to rotate around x axis to get rid of phi angle gsl_matrix_set(rot, 1,1,1); gsl_matrix_set(rot, 2,2,cos(ph_theta)); gsl_matrix_set(rot, 0,0,cos(ph_theta)); gsl_matrix_set(rot, 0,2,-sin(ph_theta)); gsl_matrix_set(rot, 2,0,sin(ph_theta)); gsl_blas_dgemv(CblasNoTrans, 1, rot, &el_p_prime.vector, 0, result); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2)); printf("Middle: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2)); */ gsl_matrix_set_all(rot,0); gsl_matrix_set(rot, 0,0,1); gsl_matrix_set(rot, 1,1,cos(-ph_phi)); gsl_matrix_set(rot, 2,2,cos(-ph_phi)); gsl_matrix_set(rot, 1,2,-sin(-ph_phi)); gsl_matrix_set(rot, 2,1,sin(-ph_phi)); gsl_blas_dgemv(CblasNoTrans, 1, rot, result, 0, &el_p_prime.vector); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2)); printf("Final EL_P_vec: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(&el_p_prime.vector,0), gsl_vector_get(&el_p_prime.vector,1), gsl_vector_get(&el_p_prime.vector,2)); */ gsl_matrix_free (rot);gsl_vector_free(result); } int singleScatter(double *el_comov, double *ph_comov, double *s, gsl_rng * rand, FILE *fPtr) { //This routine performs a scattering between a photon and a moving electron. int i=0, scattering_occured=0; double dotprod_1; //to test orthogonality double *z_axis_electron_rest_frame=malloc(3*sizeof(double)); //basis vector of the z axis in the elctron rest frame double *el_v=malloc(3*sizeof(double)); double *negative_el_v=malloc(3*sizeof(double)); double *ph_p_prime=malloc(4*sizeof(double));//use this to keep track of how the ph 4 momentum changes with each rotation double *el_p_prime=malloc(4*sizeof(double)); double phi0=0, phi1=0, phi=0, theta=0; double y_dum, f_x_dum, x_dum; double x_tilde[3]={0,0,0}, y_tilde[3]={0,0,0}, x_tilde_new[3]={0,0,0}, y_tilde_new[3]={0,0,0};//initalize arrays to hold stokes coordinate system gsl_matrix *rot0= gsl_matrix_calloc (3, 3); //create matricies thats 3x3 to do rotations gsl_matrix *rot1= gsl_matrix_calloc (3, 3); gsl_matrix *scatt= gsl_matrix_calloc (4, 4); //fano's matrix for scattering stokes parameters gsl_vector *scatt_result=gsl_vector_alloc (4); gsl_vector *result0=gsl_vector_alloc (3); //vectors to hold results of rotations gsl_vector *result1=gsl_vector_alloc (3); gsl_vector *result=gsl_vector_alloc (4); gsl_vector *whole_ph_p=gsl_vector_alloc (4); gsl_vector *ph_p_orig=gsl_vector_alloc (4) ;//vector to hold the original incoming photon velocity vector in the electron rest frame gsl_vector_view ph_p ;//create vector to hold comoving photon and electron 4 momentum gsl_vector_view el_p ; gsl_vector_view stokes, test, test_x, test_y; /* Dont need these vectors anymore, plus didnt have code to free allocations so it was causing memory leaks gsl_vector *result0_x=gsl_vector_alloc (3); //vectors to hold results of rotations for stokes coordinates gsl_vector *result1_x=gsl_vector_alloc (3); gsl_vector *result0_y=gsl_vector_alloc (3); //vectors to hold results of rotations for stokes coordinates gsl_vector *result1_y=gsl_vector_alloc (3); */ //fill in z-axis basis vector *(z_axis_electron_rest_frame+0)=0; *(z_axis_electron_rest_frame+1)=0; *(z_axis_electron_rest_frame+2)=1; /* was for testing against Kraw *(s+0)=1; //should be 1.0 *(s+1)=1; *(s+2)=0; *(s+3)=0; *(ph_comov+0)=PL_CONST*1e12/C_LIGHT; *(ph_comov+1)=0; //set values of photon prime momentum from doing the scattering to use the vector view of it in dot product *(ph_comov+2)=0; *(ph_comov+3)=PL_CONST*1e12/C_LIGHT; theta=85*M_PI/180; phi=0; dotprod_1=pow(1-(pow(100, -2.0)) ,0.5); *(el_comov+0)=100*M_EL*C_LIGHT; *(el_comov+1)=100*M_EL*C_LIGHT*dotprod_1*sin(theta)*cos(phi); //set values of photon prime momentum from doing the scattering to use the vector view of it in dot product *(el_comov+2)=100*M_EL*C_LIGHT*dotprod_1*sin(theta)*sin(phi); *(el_comov+3)=100*M_EL*C_LIGHT*dotprod_1*cos(theta); */ //fill in electron velocity array and photon 4 momentum *(el_v+0)=(*(el_comov+1))/(*(el_comov+0)); *(el_v+1)=(*(el_comov+2))/(*(el_comov+0)); *(el_v+2)=(*(el_comov+3))/(*(el_comov+0)); //printf("el_v: %e, %e, %e\n", *(el_v+0), *(el_v+1), *(el_v+2)); //lorentz boost into frame where the electron is stationary lorentzBoost(el_v, el_comov, el_p_prime, 'e', fPtr); lorentzBoost(el_v, ph_comov, ph_p_prime, 'p', fPtr); //printf("New ph_p in electron rest frame: %e, %e, %e,%e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //rotate 'stokes plane' //if (STOKES_SWITCH != 0) #if STOKES_SWITCH == ON { stokesRotation(el_v, (ph_comov+1), (ph_p_prime+1), s, fPtr); stokes=gsl_vector_view_array(s, 4); } #endif //printf(fPtr, "y_tilde: %e, %e, %e\n", *(y_tilde+0), *(y_tilde+1), *(y_tilde+2)); ph_p=gsl_vector_view_array((ph_p_prime+1), 3); el_p=gsl_vector_view_array(el_p_prime,4); gsl_vector_set(ph_p_orig, 0, *(ph_p_prime+0)); gsl_vector_set(ph_p_orig, 1, *(ph_p_prime+1)); gsl_vector_set(ph_p_orig, 2, *(ph_p_prime+2)); gsl_vector_set(ph_p_orig, 3, *(ph_p_prime+3)); //gsl_blas_ddot(&y_tilde_rot.vector, &ph_p.vector, &dotprod_1); //fprintf(fPtr, "After lorentz boost Angle between the y_tilde_rot and the photon velocity vector is: %e\n", acos(dotprod_1/ gsl_blas_dnrm2(&ph_p.vector))*180/M_PI); phi0=atan2(*(ph_p_prime+2), *(ph_p_prime+1) ); //fprintf(fPtr,"Photon Phi: %e\n", phi0); //rotate the axes so that the photon incomes along the x-axis gsl_matrix_set(rot0, 2,2,1); gsl_matrix_set(rot0, 0,0,cos(-phi0)); gsl_matrix_set(rot0, 1,1,cos(-phi0)); gsl_matrix_set(rot0, 0,1,-sin(-phi0)); gsl_matrix_set(rot0, 1,0,sin(-phi0)); gsl_blas_dgemv(CblasNoTrans, 1, rot0, &ph_p.vector, 0, result0); //printf("Before Scatter rot0: stokes x=(%e, %e, %e) y=(%e, %e, %e)", gsl_vector_get(result0_x,0), gsl_vector_get(result0_x,1), gsl_vector_get(result0_x,2), gsl_vector_get(result0_y,0), gsl_vector_get(result0_y,1), gsl_vector_get(result0_y,2)); //fprintf(fPtr, "y_tilde: %e, %e, %e y_tilde_rot_result: %e, %e, %e\n", *(y_tilde+0), *(y_tilde+1), *(y_tilde+2), gsl_vector_get(y_tilde_rot_result,0), gsl_vector_get(y_tilde_rot_result,1), gsl_vector_get(y_tilde_rot_result,2)); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot0, 0,0), gsl_matrix_get(rot0, 0,1), gsl_matrix_get(rot0, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot0, 1,0), gsl_matrix_get(rot0, 1,1), gsl_matrix_get(rot0, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot0, 2,0), gsl_matrix_get(rot0, 2,1), gsl_matrix_get(rot0, 2,2)); */ //set values of ph_p_prime equal to the result and get new phi from result *(ph_p_prime+1)=gsl_vector_get(result0,0); *(ph_p_prime+2)=0;//gsl_vector_get(result,1); //just directly setting it to 0 now? *(ph_p_prime+3)=gsl_vector_get(result0,2); phi1=atan2(gsl_vector_get(result0,2), gsl_vector_get(result0,0)); //printf("rotation 1: %e, %e, %e\n", *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //fprintf(fPtr, "Photon Phi: %e\n", phi1); //printf("make sure the vector view is good: %e, %e, %e,%e\n", *(ph_p_prime+0), gsl_vector_get(&ph_p.vector,0), gsl_vector_get(&ph_p.vector,1), gsl_vector_get(&ph_p.vector,2)); //rotate around y to bring it all along x gsl_matrix_set(rot1, 1,1,1); gsl_matrix_set(rot1, 0,0,cos(-phi1)); gsl_matrix_set(rot1, 2,2,cos(-phi1)); gsl_matrix_set(rot1, 0,2,-sin(-phi1)); gsl_matrix_set(rot1, 2,0,sin(-phi1)); gsl_blas_dgemv(CblasNoTrans, 1, rot1, &ph_p.vector, 0, result1); //fprintf(fPtr, "y_tilde: %e, %e, %e y_tilde_rot vector view: %e, %e, %e\n", *(y_tilde+0), *(y_tilde+1), *(y_tilde+2), gsl_vector_get(&y_tilde_rot.vector,0), gsl_vector_get(&y_tilde_rot.vector,1), gsl_vector_get(&y_tilde_rot.vector,2)); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot1, 0,0), gsl_matrix_get(rot1, 0,1), gsl_matrix_get(rot1, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot1, 1,0), gsl_matrix_get(rot1, 1,1), gsl_matrix_get(rot1, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot1, 2,0), gsl_matrix_get(rot1, 2,1), gsl_matrix_get(rot1, 2,2)); */ //set values of ph_p_prime equal to the result and get new phi from result *(ph_p_prime+1)=*(ph_p_prime+0);//why setting it to the energy? *(ph_p_prime+2)=gsl_vector_get(result1,1); *(ph_p_prime+3)=0; //just directly setting it to 0 now? //printf("rotation 2: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //know that the stokes y axis is in -y_hat direction and stokes x asis is in the z_hat direction due to rotations and making inclimg photn come along x_hat direction, dont need to rotate the stokes plane/vector. this happens as the rotations occur (tested in python code) //double checking here //printf("Before Scatter: stokes x=(%e, %e, %e) y=(%e, %e, %e) ph_p=(%e, %e, %e, %e)\n", gsl_vector_get(result1_x,0), gsl_vector_get(result1_x,1), gsl_vector_get(result1_x,2), gsl_vector_get(result1_y,0), gsl_vector_get(result1_y,1), gsl_vector_get(result1_y,2), *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //determine if the scattering will occur between photon and electron //scattering_occured=comptonScatter(&theta, &phi, rand, fPtr); //determine the angles phi and theta for the photon to scatter into using thompson differential cross section scattering_occured=kleinNishinaScatter(&theta, &phi, *(ph_p_prime+0), *(s+1), *(s+2), rand, fPtr);//determine the angles phi and theta for the photon to scatter into using KN differential cross section, if the photon will end up scattering //fprintf(fPtr,"Phi: %e, Theta: %e\n", phi, theta); //theta=2.4475668271885342; //phi=4.014719957630734; //*(s+0)=1; //should be 1.0 //*(s+1)=1; //*(s+2)=0; //*(s+3)=0; if (scattering_occured==1) { //perform scattering and compute new 4-momenta of electron and photon //scattered photon 4 momentum gsl_vector_set(result, 0, (*(ph_p_prime+0))/(1+ (( (*(ph_p_prime+0))*(1-cos(theta)) )/(M_EL*C_LIGHT )) ) ); // scattered energy of photon gsl_vector_set(result, 1, gsl_vector_get(result,0)*cos(theta) ); gsl_vector_set(result, 2, gsl_vector_get(result,0)*sin(theta)*sin(phi) );//assume phi is clockwise from z to y gsl_vector_set(result, 3, gsl_vector_get(result,0)*sin(theta)*cos(phi) ); //fprintf(fPtr, "New ph_p0=%e Old= %e\n", gsl_vector_get(result,0), *(ph_p_prime+0)); //gsl_vector_fprintf(fPtr,result, "%e" ); //recalc x_tilde from rotation about y by angle theta do x_tilde=y_tilde X v_ph //test =gsl_vector_view_array(gsl_vector_ptr(result, 1), 3); //scatt_result is a dummy, dont need to change the stokes parameters here, just need to find the axis such that y is out of the plane of k_o-k see Ito figure 12 in polarized emission from stratisfied jets //gsl_blas_ddot(&y_tilde_rot.vector, &test.vector, &dotprod_1); //fprintf(fPtr, "Angle between the y_tilde_rot and the photon velocity vector is: %e\n", acos(dotprod_1/ gsl_blas_dnrm2(&test.vector))*180/M_PI); //gsl_vector_fprintf(fPtr,&y_tilde_rot.vector, "%e" ); //gsl_vector_fprintf(fPtr,&x_tilde_rot.vector, "%e" ); //exit(0); //calculate electron 4 momentum //prescattered photon 4 momentum gsl_vector_set(whole_ph_p, 0, (*(ph_p_prime+0))); gsl_vector_set(whole_ph_p, 1, (*(ph_p_prime+1))); gsl_vector_set(whole_ph_p, 2, (*(ph_p_prime+2))); gsl_vector_set(whole_ph_p, 3, (*(ph_p_prime+3))); gsl_vector_sub(whole_ph_p,result); //resut is saved into ph_p vector, unscattered-scattered 4 mometum of photon gsl_vector_add(&el_p.vector ,whole_ph_p); /* printf("After scattering:\n"); printf("el_p: %e, %e, %e,%e\n", gsl_vector_get(&el_p.vector,0), gsl_vector_get(&el_p.vector,1), gsl_vector_get(&el_p.vector,2), gsl_vector_get(&el_p.vector,3)); printf("ph_p: %e, %e, %e,%e\n", gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2), gsl_vector_get(result,3)); */ //rotate back to comoving frame *(ph_p_prime+0)=gsl_vector_get(result,0); *(ph_p_prime+1)=gsl_vector_get(result,1); //set values of photon prime momentum from doing the scattering to use the vector view of it in dot product *(ph_p_prime+2)=gsl_vector_get(result,2); *(ph_p_prime+3)=gsl_vector_get(result,3); gsl_matrix_set_all(rot1,0); gsl_matrix_set(rot1, 1,1,1); gsl_matrix_set(rot1, 0,0,cos(-phi1)); gsl_matrix_set(rot1, 2,2,cos(-phi1)); gsl_matrix_set(rot1, 0,2,sin(-phi1)); gsl_matrix_set(rot1, 2,0,-sin(-phi1)); gsl_blas_dgemv(CblasNoTrans, 1, rot1, &ph_p.vector, 0, result1); /* printf("Photon Phi: %e\n", phi1); printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot1, 0,0), gsl_matrix_get(rot1, 0,1), gsl_matrix_get(rot1, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot1, 1,0), gsl_matrix_get(rot1, 1,1), gsl_matrix_get(rot1, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot1, 2,0), gsl_matrix_get(rot1, 2,1), gsl_matrix_get(rot1, 2,2)); */ //set values of ph_p_prime to result1 from undoing 2nd rotation *(ph_p_prime+1)=gsl_vector_get(result1,0); *(ph_p_prime+2)=gsl_vector_get(result1,1); *(ph_p_prime+3)=gsl_vector_get(result1,2); //printf("Undo rotation 2: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //ignore the electron, dont care about it, undo the first rotation gsl_matrix_set_all(rot0,0); gsl_matrix_set(rot0, 2,2,1); gsl_matrix_set(rot0, 0,0,cos(-phi0)); gsl_matrix_set(rot0, 1,1,cos(-phi0)); gsl_matrix_set(rot0, 0,1,sin(-phi0)); gsl_matrix_set(rot0, 1,0,-sin(-phi0)); gsl_blas_dgemv(CblasNoTrans, 1, rot0, &ph_p.vector, 0, result0); /* printf("Photon Phi: %e\n", phi0); printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot0, 0,0), gsl_matrix_get(rot0, 0,1), gsl_matrix_get(rot0, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot0, 1,0), gsl_matrix_get(rot0, 1,1), gsl_matrix_get(rot0, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot0, 2,0), gsl_matrix_get(rot0, 2,1), gsl_matrix_get(rot0, 2,2)); */ //do the scattering of the stokes vector //rotate it by phi and then scatter it and rotate back and then renormalize it such that i=1 //if (STOKES_SWITCH != 0) #if STOKES_SWITCH == ON { //orient the stokes coordinate system such that its perpendicular to the scattering plane findXY(gsl_vector_ptr(ph_p_orig, 1),z_axis_electron_rest_frame, x_tilde, y_tilde); findXY(gsl_vector_ptr(result0,0),gsl_vector_ptr(ph_p_orig, 1), x_tilde_new, y_tilde_new); phi=findPhi(x_tilde, y_tilde, x_tilde_new, y_tilde_new); mullerMatrixRotation(phi, s, fPtr); //find the theta between the incoming and scattered photons, by doing dot product and taking arccos of it theta=acos((gsl_vector_get(ph_p_orig,1)*gsl_vector_get(result0,0)+gsl_vector_get(ph_p_orig,2)*gsl_vector_get(result0,1)+gsl_vector_get(ph_p_orig,3)*gsl_vector_get(result0,2) )/(gsl_vector_get(ph_p_orig,0)*(*(ph_p_prime+0))) ); //do the scattering of the stokes parameters gsl_matrix_set(scatt, 0,0,1.0+pow(cos(theta), 2.0)+((1-cos(theta))*(gsl_vector_get(ph_p_orig,0) - gsl_vector_get(result,0))/(M_EL*C_LIGHT ) ) ); //following lundman's matrix gsl_matrix_set(scatt, 0,1, sin(theta)*sin(theta)); gsl_matrix_set(scatt, 1,0, sin(theta)*sin(theta)); gsl_matrix_set(scatt, 1,1,1.0+cos(theta)*cos(theta)); gsl_matrix_set(scatt, 2,2, 2.0*cos(theta)); gsl_matrix_set(scatt, 3,3, 2.0*cos(theta)+ ((cos(theta))*(1-cos(theta))*(gsl_vector_get(ph_p_orig,0) - gsl_vector_get(result,0))/(M_EL*C_LIGHT )) ); //gsl_matrix_scale(scatt, (gsl_vector_get(result,0)/(*(ph_p_prime+0)))*((gsl_vector_get(result,0)/(*(ph_p_prime+0))))*0.5*3*THOM_X_SECT/(8*M_PI) ); //scale the matrix by 0.5*r_0^2 (\epsilon/\epsilon_0)^2 DONT NEED THIS BECAUSE WE NORMALIZE STOKES VECTOR SO THIS CANCELS ITSELF OUT gsl_blas_dgemv(CblasNoTrans, 1, scatt, &stokes.vector, 0, scatt_result); /* fprintf(fPtr,"before s: %e, %e, %e,%e\n", gsl_vector_get(&stokes.vector,0), gsl_vector_get(&stokes.vector,1), gsl_vector_get(&stokes.vector,2), gsl_vector_get(&stokes.vector,3)); fprintf(fPtr,"Scatt Matrix 0: %e,%e, %e, %e\n", gsl_matrix_get(scatt, 0,0), gsl_matrix_get(scatt, 0,1), gsl_matrix_get(scatt, 0,2), gsl_matrix_get(scatt, 0,3)); fprintf(fPtr,"Scatt Matrix 1: %e,%e, %e, %e\n", gsl_matrix_get(scatt, 1,0), gsl_matrix_get(scatt, 1,1), gsl_matrix_get(scatt, 1,2), gsl_matrix_get(scatt, 1,3)); fprintf(fPtr,"Scatt Matrix 2: %e,%e, %e, %e\n", gsl_matrix_get(scatt, 2,0), gsl_matrix_get(scatt, 2,1), gsl_matrix_get(scatt, 2,2), gsl_matrix_get(scatt, 2,3)); fprintf(fPtr,"Scatt Matrix 3: %e,%e, %e, %e\n", gsl_matrix_get(scatt, 3,0), gsl_matrix_get(scatt, 3,1), gsl_matrix_get(scatt, 3,2), gsl_matrix_get(scatt, 3,3)); fprintf(fPtr,"s: %e, %e, %e,%e\n", gsl_vector_get(scatt_result,0), gsl_vector_get(scatt_result,1), gsl_vector_get(scatt_result,2), gsl_vector_get(scatt_result,3)); */ //normalize and rotate back *(s+0)=gsl_vector_get(scatt_result,0)/gsl_vector_get(scatt_result,0); //should be 1.0 *(s+1)=gsl_vector_get(scatt_result,1)/gsl_vector_get(scatt_result,0); *(s+2)=gsl_vector_get(scatt_result,2)/gsl_vector_get(scatt_result,0); *(s+3)=gsl_vector_get(scatt_result,3)/gsl_vector_get(scatt_result,0); //fprintf(fPtr,"s after norm: %e, %e, %e,%e\n", gsl_vector_get(&stokes.vector,0), gsl_vector_get(&stokes.vector,1), gsl_vector_get(&stokes.vector,2), gsl_vector_get(&stokes.vector,3)); //need to find current stokes coordinate system defined in the plane of k-k_0 findXY(gsl_vector_ptr(result0,0),gsl_vector_ptr(ph_p_orig, 1), x_tilde, y_tilde); //then find the new coordinate system between scattered photon 4 onetum and the z axis findXY(gsl_vector_ptr(result0,0),z_axis_electron_rest_frame, x_tilde_new, y_tilde_new); //find phi to transform between the two coodinate systems phi=findPhi(x_tilde, y_tilde, x_tilde_new, y_tilde_new); //do the rotation mullerMatrixRotation(phi, s, fPtr); } #endif //now update the array with the new scattered photon 4 monetum *(ph_p_prime+1)=gsl_vector_get(result0,0); *(ph_p_prime+2)=gsl_vector_get(result0,1); *(ph_p_prime+3)=gsl_vector_get(result0,2); //gsl_blas_ddot(&y_tilde_rot.vector, &ph_p.vector, &dotprod_1); //fprintf(fPtr, "Angle between the y_tilde_rot and the photon velocity vector is: %e\n", acos(dotprod_1/ gsl_blas_dnrm2(&ph_p.vector))*180/M_PI); //printf("Undo rotation 1: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3)); //deboost photon to lab frame *(negative_el_v+0)=(-1*(*(el_v+0))); *(negative_el_v+1)=(-1*(*(el_v+1))); *(negative_el_v+2)=(-1*(*(el_v+2))); lorentzBoost(negative_el_v, ph_p_prime, ph_comov, 'p', fPtr); //printf("Undo boost 1: %e, %e, %e, %e\n", *(ph_comov+0), *(ph_comov+1), *(ph_comov+2), *(ph_comov+3)); //dont need to find stokes vector and do previosu rotations, can just find the stokes coordinates in function because the stokes coordinate vectors rotate with the photon vector and no rotations to a new stokes coordinate system are needed //if (STOKES_SWITCH != 0) #if STOKES_SWITCH == ON { stokesRotation(negative_el_v, (ph_p_prime+1), (ph_comov+1), s, fPtr); } #endif //exit(0); } gsl_matrix_free(rot0); gsl_matrix_free(rot1);gsl_matrix_free(scatt);gsl_vector_free(result0);gsl_vector_free(result1);gsl_vector_free(result); gsl_vector_free(scatt_result);gsl_vector_free(ph_p_orig); gsl_vector_free(whole_ph_p);free(ph_p_prime);free(el_p_prime);free(el_v); free(negative_el_v); free(z_axis_electron_rest_frame); return scattering_occured; } int comptonScatter(double *theta, double *phi, gsl_rng * rand, FILE *fPtr) { double y_dum, f_x_dum, x_dum; //generate random theta and phi angles for scattering *phi=gsl_rng_uniform(rand)*2*M_PI; //printf("Phi: %e\n", phi); y_dum=1; //initalize loop to get a random theta f_x_dum=0; while (y_dum>f_x_dum) { y_dum=gsl_rng_uniform(rand)*1.09; x_dum=gsl_rng_uniform(rand)*M_PI; f_x_dum=sin(x_dum)*(1+pow(cos(x_dum),2)); } *theta=x_dum; return 1; } int kleinNishinaScatter(double *theta, double *phi, double p0, double q, double u, gsl_rng * rand, FILE *fPtr) { //sample theta using: https://doi.org/10.13182/NSE11-57 double phi_dum=0, cos_theta_dum=0, f_phi_dum=0, f_cos_theta_dum=0, f_theta_dum=0, phi_y_dum=0, cos_theta_y_dum=0, KN_x_section_over_thomson_x_section=0, rand_num=0; double mu=0, phi_norm=0, phi_max=0, norm=0; int will_scatter=0; double energy_ratio= p0/(M_EL*C_LIGHT ); //h*nu / mc^2 , units of p0 is erg/c //determine the KN cross section over the thomson cross section From RYBICKI AND LIGHTMAN pg 197 KN_x_section_over_thomson_x_section= (3.0/4.0)*( ( ((1+energy_ratio)/ pow(energy_ratio,3.0))*(((2*energy_ratio)*(1+energy_ratio)/(1+2*energy_ratio)) - log(1+2*energy_ratio))) + (log(1+2*energy_ratio)/(2*energy_ratio)) - ((1+3*energy_ratio)/pow((1+2*energy_ratio),2.0)) ); rand_num=gsl_rng_uniform(rand); if ((rand_num<= KN_x_section_over_thomson_x_section) || (p0 < 1e-2*(M_EL*C_LIGHT ) )) { //include last condition so low energy seed phtoons can scatter (as they should under thompson scattering), calculating KN_x_section_over_thomson_x_section incurs numerical error at very low frequencies //fprintf(fPtr,"In If!\n"); //fflush(fPtr); //sample a theta and phi from the differential cross sections phi_y_dum=1; //initalize loop to get a random phi and theta cos_theta_y_dum=1; f_cos_theta_dum=0; f_phi_dum=0; while ((cos_theta_y_dum>f_cos_theta_dum)) { //do phi and theta seperately, sample theta using: https://doi.org/10.13182/NSE11-57 cos_theta_y_dum=gsl_rng_uniform(rand)*2; cos_theta_dum=gsl_rng_uniform(rand)*2-1; f_cos_theta_dum=pow((1+energy_ratio*(1-cos_theta_dum)),-2)*(energy_ratio*(1-cos_theta_dum)+(1/(1+energy_ratio*(1-cos_theta_dum))) + cos_theta_dum*cos_theta_dum); } *theta=acos(cos_theta_dum); mu=1+energy_ratio*(1-cos(*theta)); f_theta_dum=(pow(mu, -1.0) + pow(mu, -3.0) - pow(mu, -2.0)*pow(sin(*theta), 2.0))*sin(*theta); while ((phi_y_dum>f_phi_dum) ) { #if STOKES_SWITCH == OFF { //not considering polarization therefore can jjst sample between 0 and 2*pi evenly phi_dum=gsl_rng_uniform(rand)*2*M_PI; phi_y_dum=-1; // this is to exit the while statement //fprintf(fPtr," phi_dum: %e\n", phi_dum); //fflush(fPtr); } #else { if (u==0 && q==0) { phi_dum=gsl_rng_uniform(rand)*2*M_PI; phi_y_dum=-1; // this is to exit the while statement } else { //if we are considering polarization calulate the norm for the distributiion to be between 1 and 0 phi_max=abs(atan2(-u,q))/2.0; norm=(f_theta_dum + pow(mu, -2.0)*pow(sin(*theta), 3.0) * (q*cos(2*phi_max)-u*sin(2*phi_max))); //fprintf(fPtr,"norm: %e\n", norm); //fflush(fPtr); phi_y_dum=gsl_rng_uniform(rand); phi_dum=gsl_rng_uniform(rand)*2*M_PI; f_phi_dum=(f_theta_dum + pow(mu, -2.0)*pow(sin(*theta), 3.0) * (q*cos(2*phi_dum)-u*sin(2*phi_dum)))/norm; //signs on q and u based on Lundman/ McMaster //fprintf(fPtr,"phi_y_dum: %e, theta_dum: %e, mu: %e, f_theta_dum: %e, phi_dum: %e, f_phi_dum: %e, u: %e, q: %e\n", phi_y_dum, theta_dum, mu, f_theta_dum, phi_dum, f_phi_dum, u, q); //fflush(fPtr); } } #endif } *phi=phi_dum; will_scatter=1; } else { will_scatter=0; } return will_scatter; } double averagePhotonEnergy(struct photon *ph, int num_ph) { //to calculate weighted photon energy in ergs int i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double e_sum=0, w_sum=0; #pragma omp parallel for reduction(+:e_sum) reduction(+:w_sum) for (i=0;i<num_ph;i++) { #if SYNCHROTRON_SWITCH == ON if (((ph+i)->weight != 0)) //dont want account for null or absorbed OLD_COMPTONIZED_PHOTON photons #endif { e_sum+=(((ph+i)->p0)*((ph+i)->weight)); w_sum+=((ph+i)->weight); } } return (e_sum*C_LIGHT)/w_sum; } void phScattStats(struct photon *ph, int ph_num, int *max, int *min, double *avg, double *r_avg, FILE *fPtr ) { int temp_max=0, temp_min=INT_MAX, i=0, count=0, count_synch=0, count_comp=0, count_i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double sum=0, avg_r_sum=0, avg_r_sum_synch=0, avg_r_sum_comp=0, avg_r_sum_inject=0; //printf("Num threads: %d", num_thread); #pragma omp parallel for num_threads(num_thread) reduction(min:temp_min) reduction(max:temp_max) reduction(+:sum) reduction(+:avg_r_sum) reduction(+:count) for (i=0;i<ph_num;i++) { #if SYNCHROTRON_SWITCH == ON if (((ph+i)->weight != 0)) //dont want account for null or absorbed OLD_COMPTONIZED_PHOTON photons #endif { sum+=((ph+i)->num_scatt); avg_r_sum+=pow(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2), 0.5); //printf("%d %c %e %e %e %e %e %e\n", i, (ph+i)->type, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt); if (((ph+i)->num_scatt) > temp_max ) { temp_max=((ph+i)->num_scatt); //printf("The new max is: %d\n", temp_max); } //if ((i==0) || (((ph+i)->num_scatt)<temp_min)) if (((ph+i)->num_scatt)<temp_min) { temp_min=((ph+i)->num_scatt); //printf("The new min is: %d\n", temp_min); } if (((ph+i)->type) == INJECTED_PHOTON ) { avg_r_sum_inject+=pow(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2), 0.5); count_i++; } if ((((ph+i)->type) == COMPTONIZED_PHOTON) || (((ph+i)->type) == OLD_COMPTONIZED_PHOTON)) { avg_r_sum_comp+=pow(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2), 0.5); count_comp++; } count++; } if (((ph+i)->type) == SYNCHROTRON_POOL_PHOTON ) { avg_r_sum_synch+=pow(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2), 0.5); count_synch++; } } fprintf(fPtr, "In this frame Avg r for i type: %e c and o type: %e and s type: %e\n", avg_r_sum_inject/count_i, avg_r_sum_comp/count_comp, avg_r_sum_synch/count_synch); fflush(fPtr); //exit(0); *avg=sum/count; *r_avg=avg_r_sum/count; *max=temp_max; *min=temp_min; } void cylindricalPrep(double *gamma, double *vx, double *vy, double *dens, double *dens_lab, double *pres, double *temp, int num_array) { double gamma_infinity=100, t_comov=1*pow(10, 5), ddensity=3e-7;// the comoving temperature in Kelvin, and the comoving density in g/cm^2 int i=0; double vel=pow(1-pow(gamma_infinity, -2.0) ,0.5), lab_dens=gamma_infinity*ddensity; for (i=0; i<num_array;i++) { *(gamma+i)=gamma_infinity; *(vx+i)=0; *(vy+i)=vel; *(dens+i)=ddensity; *(dens_lab+i)=lab_dens; *(pres+i)=(A_RAD*pow(t_comov, 4.0))/(3); *(temp+i)=pow(3*(*(pres+i))/(A_RAD) ,1.0/4.0); //just assign t_comov } } void sphericalPrep(double *r, double *x, double *y, double *gamma, double *vx, double *vy, double *dens, double *dens_lab, double *pres, double *temp, int num_array, FILE *fPtr) { double gamma_infinity=100, lumi=1e52, r00=1e8; //shopuld be 10^57 //double gamma_infinity=5, lumi=1e52, r00=1e8; //shopuld be 10^57 double vel=0; int i=0; for (i=0;i<num_array;i++) { if ((*(r+i)) >= (r00*gamma_infinity)) { *(gamma+i)=gamma_infinity; *(pres+i)=(lumi*pow(r00, 2.0/3.0)*pow(*(r+i), -8.0/3.0) )/(12.0*M_PI*C_LIGHT*pow(gamma_infinity, 4.0/3.0)); } else { *(gamma+i)=(*(r+i))/r00; *(pres+i)=(lumi*pow(r00, 2.0))/(12.0*M_PI*C_LIGHT*pow(*(r+i), 4.0) ); } vel=pow(1-(pow(*(gamma+i), -2.0)) ,0.5); *(vx+i)=(vel*(*(x+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5); *(vy+i)=(vel*(*(y+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5); *(dens+i)=lumi/(4*M_PI*pow(*(r+i), 2.0)*pow(C_LIGHT, 3.0)*gamma_infinity*(*(gamma+i))); *(dens_lab+i)=(*(dens+i))*(*(gamma+i)); *(temp+i)=pow(3*(*(pres+i))/(A_RAD) ,1.0/4.0); //fprintf(fPtr,"Gamma: %lf\nR: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n", *(gamma+i), *(r+i), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i)); } } void structuredFireballPrep(double *r, double *theta, double *x, double *y, double *gamma, double *vx, double *vy, double *dens, double *dens_lab, double *pres, double *temp, int num_array, FILE *fPtr) { //This model is provided by Lundman, Peer, Ryde 2014, use this to compare our MCRaT polarization to their polarizations double gamma_0=100, lumi=1e52, r00=1e8, theta_j=1e-2, p=4; //theta_j in paper is 1e-2, 3e-2, 1e-1 and p is 1,2,4 double T_0=pow(lumi/(4*M_PI*r00*r00*A_RAD*C_LIGHT), 1.0/4.0); double eta=0, r_sat=0; double vel=0, theta_ratio=0; int i=0; for (i=0;i<num_array;i++) { theta_ratio=(*(theta+i))/theta_j; eta=gamma_0*pow(1+pow(theta_ratio, 2*p) , -0.5); if (*(theta+i) >= theta_j*pow(gamma_0/2, 1.0/p)) { //*(gamma+i)=2; //outside with of shear layer have gamma be 2 like in paper eta=2.0; } r_sat=eta*r00; if ((*(r+i)) >= r_sat) { *(gamma+i)=eta; *(temp+i)=T_0*pow(r_sat/(*(r+i)), 2.0/3.0)/eta; } else { *(gamma+i)=(*(r+i))/r_sat; //not sure if this is right but it shouldn't matter since we're injecting our photons far from r00 *(temp+i)=T_0; } vel=pow(1-(pow(*(gamma+i), -2.0)) ,0.5); *(vx+i)=(vel*(*(x+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5); *(vy+i)=(vel*(*(y+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5); *(dens+i)=M_P*lumi/(4*M_PI*M_P*C_LIGHT*C_LIGHT*C_LIGHT*eta*vel*(*(gamma+i))*(*(r+i))*(*(r+i))); //equation paper has extra c, but then units dont work out *(dens_lab+i)=(*(dens+i))*(*(gamma+i)); *(pres+i)=(A_RAD*pow(*(temp+i), 4.0))/(3); //fprintf(fPtr,"eta: %lf\nr_sat: %lf\nGamma: %lf\nR: %lf\nTheta: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n\n", eta, r_sat, *(gamma+i), *(r+i), (*(theta+i)), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i)); } } void dirFileMerge(char dir[200], int start_frame, int last_frame, int numprocs, int angle_id, FILE *fPtr ) { //function to merge files in mcdir produced by various threads double *p0=NULL, *p1=NULL, *p2=NULL, *p3=NULL, *comv_p0=NULL, *comv_p1=NULL, *comv_p2=NULL, *comv_p3=NULL, *r0=NULL, *r1=NULL, *r2=NULL, *s0=NULL, *s1=NULL, *s2=NULL, *s3=NULL, *num_scatt=NULL, *weight=NULL; int i=0, j=0, k=0, isNotCorrupted=0, num_types=9; //just save lab 4 momentum, position and num_scatt by default int increment=1; char filename_k[2000]="", file_no_thread_num[2000]="", cmd[2000]="", mcdata_type[20]=""; char group[200]="", *ph_type=NULL; hid_t file, file_new, group_id, dspace; hsize_t dims[1]={0}; herr_t status, status_group; hid_t dset_p0, dset_p1, dset_p2, dset_p3, dset_comv_p0, dset_comv_p1, dset_comv_p2, dset_comv_p3, dset_r0, dset_r1, dset_r2, dset_s0, dset_s1, dset_s2, dset_s3, dset_num_scatt, dset_weight, dset_weight_frame, dset_ph_type; //printf("Merging files in %s\n", dir); //#pragma omp parallel for num_threads(num_thread) firstprivate( filename_k, file_no_thread_num, cmd,mcdata_type,num_files, increment ) private(i,j,k) // i < last frame because calculation before this function gives last_frame as the first frame of the next process set of frames to merge files for #if COMV_SWITCH == ON && STOKES_SWITCH == ON { num_types=17;//both switches on, want to save comv and stokes } #elif COMV_SWITCH == ON || STOKES_SWITCH == ON { num_types=13;//either switch acivated, just subtract 4 datasets } #else { num_types=9;//just save lab 4 momentum, position and num_scatt } #endif #if SAVE_TYPE == ON { num_types+=1; } #endif for (i=start_frame;i<last_frame;i=i+increment) { fprintf(fPtr, "Merging files for frame: %d\n", i); fflush(fPtr); #if SIM_SWITCH == RIKEN && DIMENSIONS == 3 if (i>=3000) { increment=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1 } #endif j=0; for (k=0;k<numprocs;k++) { //for each process' file, find out how many elements and add up to find total number of elements needed in the data set for the frame number snprintf(filename_k,sizeof(filename_k),"%s%s%d%s",dir,"mc_proc_", k, ".h5" ); //open the file file=H5Fopen(filename_k, H5F_ACC_RDONLY, H5P_DEFAULT); //see if the frame exists snprintf(group,sizeof(group),"%d",i ); status = H5Eset_auto(NULL, NULL, NULL); status_group = H5Gget_objinfo (file, group, 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); //if it does open it and read in the size if (status_group == 0) { //open the datatset group_id = H5Gopen2(file, group, H5P_DEFAULT); dset_p0 = H5Dopen (group_id, "P0", H5P_DEFAULT); //open dataset //get the number of points dspace = H5Dget_space (dset_p0); status=H5Sget_simple_extent_dims(dspace, dims, NULL); //save dimesnions in dims j+=dims[0];//calculate the total number of photons to save to new hdf5 file status = H5Sclose (dspace); status = H5Dclose (dset_p0); status = H5Gclose(group_id); } status = H5Fclose(file); } //for continuing if the simulation gets stopped, check to see if the new file exists and if the information is correct //if the information is incorrect, create file by overwriting it, otherwise dont need to do anything snprintf(file_no_thread_num,sizeof(file_no_thread_num),"%s%s%d%s",dir,"mcdata_", i, ".h5" ); status = H5Eset_auto(NULL, NULL, NULL); //turn off automatic error printing file_new=H5Fcreate(file_no_thread_num, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); //see if the file initially does/doesnt exist status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); //turn on auto error printing if (file_new<0) { //fprintf(fPtr, "Checking File %s\n",file_no_thread_num ); //fflush(fPtr); //the file exists, open it with read write file_new=H5Fopen(file_no_thread_num, H5F_ACC_RDWR, H5P_DEFAULT); for (k=0;k<num_types;k++) { #if COMV_SWITCH == ON && STOKES_SWITCH == ON { switch (k) { case 0: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P0"); break; case 1: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P1");break; case 2: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P2"); break; case 3: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P3"); break; case 4: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P0"); break; case 5: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P1");break; case 6: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P2"); break; case 7: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P3"); break; case 8: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R0"); break; case 9: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R1"); break; case 10: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R2"); break; case 11: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S0"); break; case 12: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S1");break; case 13: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S2"); break; case 14: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S3"); break; case 15: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "NS"); break; case 16: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PW"); break; #if SAVE_TYPES == ON { case 17: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PT"); break; } #endif } } #elif STOKES_SWITCH == ON && COMV_SWITCH == OFF { switch (k) { case 0: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P0"); break; case 1: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P1");break; case 2: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P2"); break; case 3: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P3"); break; case 4: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R0"); break; case 5: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R1"); break; case 6: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R2"); break; case 7: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S0"); break; case 8: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S1");break; case 9: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S2"); break; case 10: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "S3"); break; case 11: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "NS"); break; case 12: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PW"); break; #if SAVE_TYPES == ON { case 13: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PT"); break; } #endif } } #elif STOKES_SWITCH == OFF && COMV_SWITCH == ON { switch (k) { case 0: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P0"); break; case 1: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P1");break; case 2: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P2"); break; case 3: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P3"); break; case 4: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P0"); break; case 5: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P1");break; case 6: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P2"); break; case 7: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "COMV_P3"); break; case 8: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R0"); break; case 9: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R1"); break; case 10: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R2"); break; case 11: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "NS"); break; case 12: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PW"); break; #if SAVE_TYPES == ON { case 13: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PT"); break; } #endif } } #else { switch (k) { case 0: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P0"); break; case 1: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P1");break; case 2: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P2"); break; case 3: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "P3"); break; case 4: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R0"); break; case 5: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R1"); break; case 6: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "R2"); break; case 7: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "NS"); break; case 8: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PW"); break; #if SAVE_TYPES == ON { case 9: snprintf(mcdata_type,sizeof(mcdata_type), "%s", "PT"); break; } #endif } } #endif //open the datatset dset_p0 = H5Dopen (file_new, mcdata_type, H5P_DEFAULT); //open dataset //get the number of points dspace = H5Dget_space (dset_p0); status=H5Sget_simple_extent_dims(dspace, dims, NULL); //save dimesnions in dims //fprintf(fPtr, "j:%d, dim: %d\n",j, dims[0] ); //fflush(fPtr); isNotCorrupted += fmod(dims[0], j); //if the dimension is the dame then the fmod ==0 (remainder of 0), if all datatsets are ==0 then you get a truth value of 0 meaning that it isnt corrupted status = H5Sclose (dspace); status = H5Dclose (dset_p0); } status = H5Fclose(file_new); file_new=-1; //do this so if the file exists it doesnt go into the rewriting portion if the file does exist } //fprintf(fPtr, "file %s has isNotCorrupted=%d\n", file_no_thread_num, isNotCorrupted ); //fflush(fPtr); //if the new file doesnt have the dimensions that it should, open it and write over the file, or if the file doesnt exist if ((file_new>=0) || (isNotCorrupted != 0 )) { //fprintf(fPtr, "In IF\n" ); //fflush(fPtr); if (isNotCorrupted != 0) { //if the data is corrupted overwrite the file file_new = H5Fcreate (file_no_thread_num, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); } //now allocate enough ememory for j number of points p0=malloc(j*sizeof(double)); p1=malloc(j*sizeof(double)); p2=malloc(j*sizeof(double)); p3=malloc(j*sizeof(double)); comv_p0=malloc(j*sizeof(double)); comv_p1=malloc(j*sizeof(double)); comv_p2=malloc(j*sizeof(double)); comv_p3=malloc(j*sizeof(double)); r0=malloc(j*sizeof(double)); r1=malloc(j*sizeof(double)); r2=malloc(j*sizeof(double)); s0=malloc(j*sizeof(double)); s1=malloc(j*sizeof(double)); s2=malloc(j*sizeof(double)); s3=malloc(j*sizeof(double)); num_scatt=malloc(j*sizeof(double)); weight=malloc(j*sizeof(double)); ph_type=malloc((j)*sizeof(char)); j=0; for (k=0;k<numprocs;k++) { //for each process open and read the contents of the dataset snprintf(filename_k,sizeof(filename_k),"%s%s%d%s",dir,"mc_proc_", k, ".h5" ); file=H5Fopen(filename_k, H5F_ACC_RDONLY, H5P_DEFAULT); snprintf(group,sizeof(group),"%d",i ); status = H5Eset_auto(NULL, NULL, NULL); status_group = H5Gget_objinfo (file, group, 0, NULL); status = H5Eset_auto(H5E_DEFAULT, H5Eprint2, stderr); if (status_group == 0) { //open the datatset group_id = H5Gopen2(file, group, H5P_DEFAULT); dset_p0 = H5Dopen (group_id, "P0", H5P_DEFAULT); //open dataset dset_p1 = H5Dopen (group_id, "P1", H5P_DEFAULT); dset_p2 = H5Dopen (group_id, "P2", H5P_DEFAULT); dset_p3 = H5Dopen (group_id, "P3", H5P_DEFAULT); #if COMV_SWITCH == ON { dset_comv_p0 = H5Dopen (group_id, "COMV_P0", H5P_DEFAULT); //open dataset dset_comv_p1 = H5Dopen (group_id, "COMV_P1", H5P_DEFAULT); dset_comv_p2 = H5Dopen (group_id, "COMV_P2", H5P_DEFAULT); dset_comv_p3 = H5Dopen (group_id, "COMV_P3", H5P_DEFAULT); } #endif dset_r0 = H5Dopen (group_id, "R0", H5P_DEFAULT); dset_r1 = H5Dopen (group_id, "R1", H5P_DEFAULT); dset_r2 = H5Dopen (group_id, "R2", H5P_DEFAULT); #if STOKES_SWITCH == ON { dset_s0 = H5Dopen (group_id, "S0", H5P_DEFAULT); dset_s1 = H5Dopen (group_id, "S1", H5P_DEFAULT); dset_s2 = H5Dopen (group_id, "S2", H5P_DEFAULT); dset_s3 = H5Dopen (group_id, "S3", H5P_DEFAULT); } #endif dset_num_scatt = H5Dopen (group_id, "NS", H5P_DEFAULT); #if SYNCHROTRON_SWITCH == ON { dset_weight = H5Dopen (group_id, "PW", H5P_DEFAULT); // have to account for this only being used for synchrotron emission switch being on } #else { dset_weight = H5Dopen (file, "PW", H5P_DEFAULT); //for non synch runs look at the global /PW dataset } #endif #if SAVE_TYPE == ON { dset_ph_type = H5Dopen (group_id, "PT", H5P_DEFAULT); } #endif //read the data in status = H5Dread(dset_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (p0+j)); status = H5Dread(dset_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (p1+j)); status = H5Dread(dset_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (p2+j)); status = H5Dread(dset_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (p3+j)); #if COMV_SWITCH == ON { status = H5Dread(dset_comv_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (comv_p0+j)); status = H5Dread(dset_comv_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (comv_p1+j)); status = H5Dread(dset_comv_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (comv_p2+j)); status = H5Dread(dset_comv_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (comv_p3+j)); } #endif status = H5Dread(dset_r0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (r0+j)); status = H5Dread(dset_r1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (r1+j)); status = H5Dread(dset_r2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (r2+j)); #if STOKES_SWITCH == ON { status = H5Dread(dset_s0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (s0+j)); status = H5Dread(dset_s1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (s1+j)); status = H5Dread(dset_s2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (s2+j)); status = H5Dread(dset_s3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (s3+j)); } #endif status = H5Dread(dset_num_scatt, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (num_scatt+j)); status = H5Dread(dset_weight, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, (weight+j)); #if SAVE_TYPE == ON { status = H5Dread(dset_ph_type, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, (ph_type+j)); } #endif //get the number of points dspace = H5Dget_space (dset_p0); status=H5Sget_simple_extent_dims(dspace, dims, NULL); //save dimesnions in dims j+=dims[0];//calculate the total number of photons to save to new hdf5 file status = H5Sclose (dspace); status = H5Dclose (dset_p0); status = H5Dclose (dset_p1); status = H5Dclose (dset_p2); status = H5Dclose (dset_p3); #if COMV_SWITCH == ON { status = H5Dclose (dset_comv_p0); status = H5Dclose (dset_comv_p1); status = H5Dclose (dset_comv_p2); status = H5Dclose (dset_comv_p3); } #endif status = H5Dclose (dset_r0); status = H5Dclose (dset_r1); status = H5Dclose (dset_r2); #if STOKES_SWITCH == ON { status = H5Dclose (dset_s0); status = H5Dclose (dset_s1); status = H5Dclose (dset_s2); status = H5Dclose (dset_s3); } #endif #if SAVE_TYPE == ON { status = H5Dclose (dset_ph_type); } #endif status = H5Dclose (dset_num_scatt); status = H5Dclose (dset_weight); status = H5Gclose(group_id); } status = H5Fclose(file); } //create the datatspace and dataset dims[0]=j; dspace = H5Screate_simple(1, dims, NULL); dset_p0=H5Dcreate2(file_new, "P0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_p1=H5Dcreate2(file_new, "P1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_p2=H5Dcreate2(file_new, "P2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_p3=H5Dcreate2(file_new, "P3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #if COMV_SWITCH == ON { dset_comv_p0=H5Dcreate2(file_new, "COMV_P0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_comv_p1=H5Dcreate2(file_new, "COMV_P1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_comv_p2=H5Dcreate2(file_new, "COMV_P2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_comv_p3=H5Dcreate2(file_new, "COMV_P3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); } #endif dset_r0=H5Dcreate2(file_new, "R0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_r1=H5Dcreate2(file_new, "R1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_r2=H5Dcreate2(file_new, "R2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #if STOKES_SWITCH == ON { dset_s0=H5Dcreate2(file_new, "S0", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_s1=H5Dcreate2(file_new, "S1", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_s2=H5Dcreate2(file_new, "S2", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_s3=H5Dcreate2(file_new, "S3", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); } #endif dset_num_scatt=H5Dcreate2(file_new, "NS", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); dset_weight=H5Dcreate2(file_new, "PW", H5T_NATIVE_DOUBLE, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #if SAVE_TYPE == ON { dset_ph_type=H5Dcreate2(file_new, "PT", H5T_NATIVE_CHAR, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); } #endif //save the data in the new file status = H5Dwrite (dset_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p0); status = H5Dwrite (dset_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p1); status = H5Dwrite (dset_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p2); status = H5Dwrite (dset_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, p3); #if COMV_SWITCH == ON { status = H5Dwrite (dset_comv_p0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p0); status = H5Dwrite (dset_comv_p1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p1); status = H5Dwrite (dset_comv_p2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p2); status = H5Dwrite (dset_comv_p3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, comv_p3); } #endif status = H5Dwrite (dset_r0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r0); status = H5Dwrite (dset_r1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r1); status = H5Dwrite (dset_r2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, r2); #if STOKES_SWITCH == ON { status = H5Dwrite (dset_s0, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s0); status = H5Dwrite (dset_s1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s1); status = H5Dwrite (dset_s2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s2); status = H5Dwrite (dset_s3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, s3); } #endif #if SAVE_TYPE == ON { status = H5Dwrite (dset_ph_type, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, ph_type); } #endif status = H5Dwrite (dset_num_scatt, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, num_scatt); status = H5Dwrite (dset_weight, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, weight); status = H5Sclose (dspace); status = H5Dclose (dset_p0); status = H5Dclose (dset_p1); status = H5Dclose (dset_p2); status = H5Dclose (dset_p3); #if COMV_SWITCH == ON { status = H5Dclose (dset_comv_p0); status = H5Dclose (dset_comv_p1); status = H5Dclose (dset_comv_p2); status = H5Dclose (dset_comv_p3); } #endif status = H5Dclose (dset_r0); status = H5Dclose (dset_r1); status = H5Dclose (dset_r2); #if STOKES_SWITCH == ON { status = H5Dclose (dset_s0); status = H5Dclose (dset_s1); status = H5Dclose (dset_s2); status = H5Dclose (dset_s3); } #endif #if SAVE_TYPE == ON { status = H5Dclose (dset_ph_type); } #endif status = H5Dclose (dset_num_scatt); status = H5Dclose (dset_weight); status = H5Fclose (file_new); free(p0);free(p1); free(p2);free(p3); free(comv_p0);free(comv_p1); free(comv_p2);free(comv_p3); free(r0);free(r1); free(r2); free(s0);free(s1); free(s2);free(s3); free(num_scatt); free(weight); free(ph_type); isNotCorrupted=0; } } //exit(0); } void modifyFlashName(char flash_file[200], char prefix[200], int frame) { int lim1=0, lim2=0, lim3=0; char test[200]="" ; //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { //2D case lim1=10; lim2=100; lim3=1000; } #else { //3d case lim1=100; lim2=1000; lim3=10000; } #endif if (frame<lim1) { //snprintf(flash_file,sizeof(flash_file), "%s%.3d%d",prefix,000,frame); //FILEPATH,FILEROOT snprintf(test,sizeof(test), "%s%s%.3d%d",FILEPATH,FILEROOT,000,frame); } else if (frame<lim2) { //snprintf(flash_file,sizeof(flash_file), "%s%.2d%d",prefix,00,frame); snprintf(test,sizeof(test), "%s%s%.2d%d",FILEPATH,FILEROOT,00,frame); } else if (frame<lim3) { //snprintf(flash_file,sizeof(flash_file), "%s%d%d",prefix,0,frame); snprintf(test,sizeof(test), "%s%s%d%d",FILEPATH,FILEROOT,0,frame); } else { //snprintf(flash_file,sizeof(flash_file), "%s%d",prefix,frame); snprintf(test,sizeof(test), "%s%s%d",FILEPATH,FILEROOT,frame); } strncpy(flash_file, test, sizeof(test));//had to do this workaround for some weird reason //printf("test: %s\n", flash_file); } void readHydro2D(char hydro_prefix[200], int frame, double r_inj, double fps, double **x, double **y, double **szx, double **szy, double **r, double **theta, double **velx, double **vely, double **dens, double **pres, double **gamma, double **dens_lab, double **temp, int *number, int ph_inj_switch, double min_r, double max_r, FILE *fPtr) { FILE *hydroPtr=NULL; char hydrofile[200]="", file_num[200]="", full_file[200]="", file_end[200]="" ; char buf[10]=""; int i=0, j=0, k=0, elem=0, elem_factor=0; int all_index_buffer=0, r_min_index=0, r_max_index=0, theta_min_index=0, theta_max_index=0; //all_index_buffer contains phi_min, phi_max, theta_min, theta_max, r_min, r_max indexes to get from grid files int r_index=0, theta_index=0; float buffer=0; float *dens_unprc=NULL,*vel_r_unprc=NULL, *vel_theta_unprc=NULL,*pres_unprc=NULL; double ph_rmin=0, ph_rmax=0; double r_in=1e10; //double *r_edge=malloc(sizeof(double)*(R_DIM_2D+1)); //double *dr=malloc(sizeof(double)*(R_DIM_2D)); double *r_unprc=malloc(sizeof(double)*R_DIM_2D); double *theta_unprc=malloc(sizeof(double)*THETA_DIM_2D); if (ph_inj_switch==0) { ph_rmin=min_r; ph_rmax=max_r; } snprintf(file_end,sizeof(file_end),"%s","small.data" ); //density snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 1,"-" ); modifyFlashName(file_num, hydrofile, frame); fprintf(fPtr,">> Opening file %s\n", file_num); fflush(fPtr); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&theta_min_index, sizeof(int)*1, 1,hydroPtr); fread(&theta_max_index, sizeof(int)*1, 1,hydroPtr); fread(&r_min_index, sizeof(int)*1, 1,hydroPtr); fread(&r_max_index, sizeof(int)*1, 1,hydroPtr); fclose(hydroPtr); //fortran indexing starts @ 1, but C starts @ 0 r_min_index--;//=r_min_index-1; r_max_index--;//=r_max_index-1; theta_min_index--;//=theta_min_index-1; theta_max_index--;//=theta_max_index-1; elem=(r_max_index+1-r_min_index)*(theta_max_index+1-theta_min_index); //max index is max number of elements minus 1, there add one to get total number of elements fprintf(fPtr,"Elem %d\n", elem); fprintf(fPtr,"Limits %d, %d, %d, %d, %d, %d\n", all_index_buffer, all_index_buffer, theta_min_index, theta_max_index, r_min_index, r_max_index); fflush(fPtr); //now with number of elements allocate data dens_unprc=malloc(elem*sizeof(float)); vel_r_unprc=malloc(elem*sizeof(float)); vel_theta_unprc=malloc(elem*sizeof(float)); pres_unprc=malloc(elem*sizeof(float)); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran /* for (i=0;i<elem;i++) { fread((dens_unprc+i), sizeof(float),1, hydroPtr); //read data } */ fread(dens_unprc, sizeof(float),elem, hydroPtr); fclose(hydroPtr); //V_r snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 2,"-" ); modifyFlashName(file_num, hydrofile, frame); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(vel_r_unprc, sizeof(float),elem, hydroPtr); //data fclose(hydroPtr); //V_theta snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 3,"-" ); modifyFlashName(file_num, hydrofile, frame); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(vel_theta_unprc, sizeof(float), elem, hydroPtr); //data fclose(hydroPtr); //u04 is phi component but is all 0 //pres snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 8,"-" ); modifyFlashName(file_num, hydrofile, frame); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); //fprintf(fPtr,">> Opening file %s\n", full_file); //fflush(fPtr); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran //elem=(r_max_index-r_min_index)*(theta_max_index-theta_min_index); //fprintf(fPtr,"Elem %d\n", elem); //fprintf(fPtr,"Limits %d, %d, %d, %d, %d, %d\n", all_index_buffer, all_index_buffer, theta_min_index, theta_max_index, r_min_index, r_max_index); //fflush(fPtr); fread(pres_unprc, sizeof(float),elem, hydroPtr); //data fclose(hydroPtr); /* for (j=0 ;j<(theta_max_index+1-theta_min_index); j++) { for (k=0; k<(r_max_index+1-r_min_index); k++) { fprintf(fPtr,"Pres %d: %e\n", ( j*(r_max_index+1-r_min_index)+k ), *(pres_unprc+( j*(r_max_index+1-r_min_index)+k ))); //fprintf(fPtr,"Pres %d: %e\n", ( j*(r_max_index)+k ), *(pres_unprc+( j*(r_max_index)+k ))); fflush(fPtr); } } exit(0); */ //R snprintf(hydrofile,sizeof(hydrofile),"%s%s",hydro_prefix,"grid-x1.data" ); hydroPtr=fopen(hydrofile, "r"); //fprintf(fPtr,">> Opening file %s\n", hydrofile); //fflush(fPtr); i=0; while (i<R_DIM_2D) { fscanf(hydroPtr, "%lf", (r_unprc+i)); //read value fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { //printf("Here\n"); fprintf(fPtr,"R %d: %e\n", i, *(r_unprc+i)); fflush(fPtr); } */ i++; } fclose(hydroPtr); //theta from y axis snprintf(hydrofile,sizeof(hydrofile),"%s%s",hydro_prefix,"grid-x2.data" ); hydroPtr=fopen(hydrofile, "r"); //fprintf(fPtr,">> Opening file %s\n", hydrofile); //fflush(fPtr); i=0; while (i<THETA_DIM_2D) { fscanf(hydroPtr, "%lf", (theta_unprc+i)); //read value fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { fprintf(fPtr,"Theta %d: %e\n", i, *(theta_unprc+i)); fflush(fPtr); } */ i++; } fclose(hydroPtr); //limit number of array elements //fill in radius array and find in how many places r > injection radius elem_factor=0; elem=0; while (elem==0) { elem_factor++; elem=0; for (j=0 ;j<(theta_max_index+1-theta_min_index); j++) { for (k=0; k<(r_max_index+1-r_min_index); k++) { i=r_min_index+k; //look at indexes of r that are included in small hydro file //if I have photons do selection differently than if injecting photons if (ph_inj_switch==0) { //if calling this function when propagating photons, choose blocks based on where the photons are if (((ph_rmin - elem_factor*C_LIGHT/fps)<(*(r_unprc+i))) && (*(r_unprc+i) < (ph_rmax + elem_factor*C_LIGHT/fps) )) { // *(pres_unprc+(i*R_DIM*THETA_DIM + j*R_DIM + k ) elem++; } } else { //if calling this function to inject photons choose blocks based on injection parameters, r_inj, which is sufficient if (((r_inj - C_LIGHT/fps)<(*(r_unprc+i))) && (*(r_unprc+i) < (r_inj + C_LIGHT/fps) )) { // *(pres_unprc+(i*R_DIM*THETA_DIM + j*R_DIM + k ) elem++; } } } } } fprintf(fPtr, "Number of post restricted Elems: %d %e\n", elem, r_inj); fflush(fPtr); (*pres)=malloc (elem * sizeof (double )); (*velx)=malloc (elem * sizeof (double )); (*vely)=malloc (elem * sizeof (double )); (*dens)=malloc (elem * sizeof (double )); (*x)=malloc (elem * sizeof (double )); (*y)=malloc (elem * sizeof (double )); (*r)=malloc (elem * sizeof (double )); (*theta)=malloc (elem * sizeof (double )); (*gamma)=malloc (elem * sizeof (double )); (*dens_lab)=malloc (elem * sizeof (double )); //szx becomes delta r szy becomes delta theta (*szx)=malloc (elem * sizeof (double )); (*szy)=malloc (elem * sizeof (double )); (*temp)=malloc (elem * sizeof (double )); elem=0; for (j=0 ;j<(theta_max_index+1-theta_min_index); j++) { for (k=0; k<(r_max_index+1-r_min_index); k++) { r_index=r_min_index+k; //look at indexes of r that are included in small hydro file theta_index=theta_min_index+j; if (ph_inj_switch==0) { if (((ph_rmin - elem_factor*C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (ph_rmax + elem_factor*C_LIGHT/fps) )) { (*pres)[elem]=*(pres_unprc+( j*(r_max_index+1-r_min_index)+k )); (*velx)[elem]=(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )))*sin(*(theta_unprc+theta_index))+(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )))*cos(*(theta_unprc+theta_index)); (*vely)[elem]=(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )))*cos(*(theta_unprc+theta_index))-(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )))*sin(*(theta_unprc+theta_index)); (*dens)[elem]=*(dens_unprc+( j*(r_max_index+1-r_min_index)+k )); (*x)[elem]=(*(r_unprc+r_index))*sin(*(theta_unprc+theta_index)); (*y)[elem]=(*(r_unprc+r_index))*cos(*(theta_unprc+theta_index)); (*r)[elem]=*(r_unprc+r_index); (*szx)[elem]=(*(r_unprc+r_index))*((M_PI/2)/2000); (*szy)[elem]=(M_PI/2)/2000; (*theta)[elem]=*(theta_unprc+theta_index);//theta in radians in relation to jet axis (*gamma)[elem]=pow(pow(1.0-(pow(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )),2)+pow(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )),2)),0.5),-1); //v is in units of c (*dens_lab)[elem]= (*(dens_unprc+( j*(r_max_index+1-r_min_index)+k ))) * pow(pow(1.0-(pow(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )),2)+pow(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )),2)),0.5),-1); (*temp)[elem]=pow(3*(*(pres_unprc+( j*(r_max_index+1-r_min_index)+k )))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0); elem++; } } else { if (((r_inj - C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (r_inj + C_LIGHT/fps) )) { (*pres)[elem]=*(pres_unprc+( j*(r_max_index+1-r_min_index)+k )); (*velx)[elem]=(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )))*sin(*(theta_unprc+theta_index))+(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )))*cos(*(theta_unprc+theta_index)); (*vely)[elem]=(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )))*cos(*(theta_unprc+theta_index))-(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )))*sin(*(theta_unprc+theta_index)); (*dens)[elem]=*(dens_unprc+( j*(r_max_index+1-r_min_index)+k )); (*x)[elem]=(*(r_unprc+r_index))*sin(*(theta_unprc+theta_index)); (*y)[elem]=(*(r_unprc+r_index))*cos(*(theta_unprc+theta_index)); (*r)[elem]=*(r_unprc+r_index); (*szx)[elem]=(*(r_unprc+r_index))*((M_PI/2)/2000); (*szy)[elem]=(M_PI/2)/2000; (*theta)[elem]=*(theta_unprc+theta_index);//theta in radians in relation to jet axis (*gamma)[elem]=pow(pow(1.0-(pow(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )),2)+pow(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )),2)),0.5),-1); //v is in units of c (*dens_lab)[elem]= (*(dens_unprc+( j*(r_max_index+1-r_min_index)+k ))) * pow(pow(1.0-(pow(*(vel_r_unprc+( j*(r_max_index+1-r_min_index)+k )),2)+pow(*(vel_theta_unprc+( j*(r_max_index+1-r_min_index)+k )),2)),0.5),-1); (*temp)[elem]=pow(3*(*(pres_unprc+( j*(r_max_index+1-r_min_index)+k )))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0); elem++; } } } } (*number)=elem; //fprintf(fPtr, "Number of post restricted Elems: %d %e\n", elem, r_inj); //fflush(fPtr); free(pres_unprc); //works when not being freed? //fprintf(fPtr, "pres Done\n\n"); //fflush(fPtr); free(vel_r_unprc); //fprintf(fPtr, "vel_r Done\n\n"); //fflush(fPtr); free(vel_theta_unprc); //fprintf(fPtr, "vel_theta Done\n\n"); //fflush(fPtr); free(dens_unprc); //fprintf(fPtr, "dens Done\n\n"); //fflush(fPtr); free(r_unprc); //fprintf(fPtr, "r Done\n\n"); //fflush(fPtr); free(theta_unprc); //fprintf(fPtr, "theta Done\n\n"); //fflush(fPtr); pres_unprc=NULL; vel_r_unprc=NULL; vel_theta_unprc=NULL; dens_unprc=NULL; r_unprc=NULL; theta_unprc=NULL; //fprintf(fPtr, "ALL Done\n\n"); //fflush(fPtr); }
GB_unaryop__abs_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_uint16 // op(A') function: GB_tran__abs_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_uint16 ( float *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
train_codebook.h
#pragma once #include <limits> namespace sq_hnswlib { template <typename T> uint8_t *train_codebook( const std::string &filename, const int hnswM, const int hnswefC, MetricType metric_type, std::function<void(const std::string &, uint32_t &, uint32_t &)> read_meta, std::function<const std::string &, T *&, uint32_t &, uint32_t &, const uint32_t &> read_bin_file_half_dimension, std::function<const T *, const int64_t, const int64_t, const int64_t, float *&> kmeans) { uint32_t *pids = nullptr; uint32_t npts, ndim, nids, nidsdim, npts2; uint32_t total_n = 0; read_meta(filename, total_n, ndim); uint32_t half_dim = ndim / 2; float *pdata = new float[(uint64_t)total_n * (uint64_t)half_dim]; float *codes = new float[256 * ndim]; uint8_t *codebook = new uint8_t[(uint64_t)total_n * (uint64_t)ndim]; memset(codebook, 0, sizeof(uint8_t) * (uint64_t)total_n * (uint64_t)ndim); read_bin_file_half_dimension(filename, pdata, npts, ndim, 0); #pragma omp parallel for for (uint32_t i = 0; i < half_dim; ++i) { float *centers = new float[256]; kmeans(total_n, pdata + i * total_n, 1, 256, centers); for (int j = 0; j < 256; j++) { codes[j * ndim + i] = centers[j]; } for (uint32_t j = 0; j < total_n; ++j) { float min_dis = std::numeric_limits<float>::max(); for (uint32_t k = 0; k < 256; ++k) { float diff = codes[k * ndim + i] - pdata[i * total_n + j]; float now_dis = diff * diff; if (now_dis < min_dis) { min_dis = now_dis; uint32_t *p32 = &k; uint8_t *p8 = (uint8_t *)p32; codebook[j * ndim + i] = (uint8_t)(*(p8)); } } } } delete[] pdata; pdata = nullptr; read_bin_file_half_dimension(filename, pdata, npts, ndim, half_dim); #pragma omp parallel for for (uint32_t i = half_dim; i < ndim; ++i) { float *centers = new float[256]; kmeans(total_n, pdata + (i - half_dim) * total_n, 1, 256, centers); for (int j = 0; j < 256; j++) { codes[j * ndim + i] = centers[j]; } for (uint32_t j = 0; j < total_n; ++j) { float min_dis = std::numeric_limits<float>::max(); for (uint32_t k = 0; k < 256; ++k) { float diff = codes[k * ndim + i] - pdata[(i - half_dim) * total_n + j]; float now_dis = diff * diff; if (now_dis < min_dis) { min_dis = now_dis; uint32_t *p32 = &k; uint8_t *p8 = (uint8_t *)p32; codebook[j * ndim + i] = (uint8_t)(*(p8)); } } } } delete[] pdata; pdata = nullptr; return codebook; } } // namespace sq_hnswlib
GB_unaryop__minv_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_uint8 // op(A') function: GB_tran__minv_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_uint8 ( uint16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Dependency.c
#include <math.h> #include <omp.h> void a(int *x, int *y, int n) { for (int i = 0; i < n - 1; i++) { x[i] = (y[i] + x[i + 1]) / 7; } } void a_sol(int *x, int *y, int n) { int x2[n]; #pragma omp parallel for for (int i = 0; i < n - 1; i++) { x2[i] = x[i + 1]; } #pragma omp parallel for for (int i = 0; i < n - 1; i++) { x2[i] = (y[i] + x2[i]) / 7; } } void b(int *x, int *y, int *z, int n, int k) { int a; for (int i = 0; i < n; i++) { a = (x[i] + y[i]) / (i + 1); z[i] = a; } int f = sqrt(a + k); } void b_sol(int *x, int *y, int *z, int n, int k) { #pragma omp parallel for for (int i = 0; i < n; i++) { z[i] = (x[i] + y[i]) / (i + 1); } int f = sqrt(z[n] + k); } void c(int *x, int *y, int n, int a, int b) { for (int i = 0; i < n; i++) { x[i] = y[i] * 2 + b * i; } for (int i = 0; i < n; i++) { y[i] = x[i] + a / (i + 1); } } void c_sol(int *x, int *y, int n, int a, int b) { #pragma omp parallel for for (int i = 0; i < n; i++) { x[i] = y[i] * 2 + b * i; y[i] = x[i] + a / (i + 1); } }
bicg_teams.c
/** * bicg.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <[email protected]> * Rafael Cardoso F Sousa <[email protected]> * Luís Felipe Mattos <[email protected]> */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" //Error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.7 /* Problem size. */ #define NX 8192 #define NY 8192 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*j) / NX; } } for (i = 0; i < NY; i++) { p[i] = i * M_PI; } } void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu) { int i,fail; fail = 0; // Compare s with s_cuda for (i=0; i<NX; i++) { if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i=0; i<NY; i++) { if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i,j; for (i = 0; i < NY; i++) { s[i] = 0.0; } for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { s[j] = s[j] + r[i] * A[i*NY + j]; q[i] = q[i] + A[i*NY + j] * p[j]; } } } void bicg_OMP(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i, j; for (i = 0; i < NY; i++) { s[i] = 0.0; } #pragma omp target map(to: A[:NX*NY], p[:NY], r[:NX]) map(tofrom: s[:NY], q[:NX]) { #pragma omp teams { #pragma omp distribute parallel for for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { s[j] = s[j] + r[i] * A[i*NY + j]; } } #pragma omp distribute parallel for for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { q[i] = q[i] + A[i*NY + j] * p[j]; } } } } } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* r; DATA_TYPE* s; DATA_TYPE* p; DATA_TYPE* q; DATA_TYPE* s_GPU; DATA_TYPE* q_GPU; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s_GPU = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q_GPU = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n"); init_array(A, p, r); t_start = rtclock(); bicg_OMP(A, r, s_GPU, p, q_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); bicg_cpu(A, r, s, p, q); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(s, s_GPU, q, q_GPU); free(A); free(r); free(s); free(p); free(q); free(s_GPU); free(q_GPU); return 0; }
main.c
#include "common.h" static void print_help(char *argv) { END("%s [-f edge_file] [-W width] [-H height] [-D degree] [-R length] [-o output_file] [-s random_seed]\ [-n calculations] [-w max_temperature] [-c min_temperature] [-g groups] [-C cooling_cycle] [-B] [-d]\ [-F fixed_temperature] [-e deleted_edges] [-Y] [-M] [-h]\n", argv); } static void set_args(const int argc, char **argv, char *infname, int *low_length, char *outfname, int *random_seed, long long *ncalcs, double *max_temp, double *min_temp, int *groups, int *cooling_cycle, bool *enable_hill_climbing, bool *enable_detect_temp, bool *enable_bfs, bool *enable_halfway, double *fixed_temp, int *width, int *height, int *max_degree, int *deleted_edges) { if(argc < 3) print_help(argv[0]); int result; while((result = getopt(argc,argv,"f:W:H:D:R:o:s:n:w:c:g:C:BdF:YMhe:"))!=-1){ switch(result){ case 'f': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Input filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(infname, optarg); break; case 'e': *deleted_edges = atoi(optarg); if(*deleted_edges != 0 && *deleted_edges != 1 && *deleted_edges != 2) ERROR("-e value == 0 or 1 or 2\n"); break; case 'W': *width = atoi(optarg); if(*width <= 0) ERROR("-W value > 0\n"); break; case 'H': *height = atoi(optarg); if(*height <= 0) ERROR("-H value > 0\n"); break; case 'D': *max_degree = atoi(optarg); if(*max_degree <= 0) ERROR("-D value > 0\n"); break; case 'R': *low_length = atoi(optarg); if(*low_length <= 0) ERROR("-R value > 0\n"); break; case 'o': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Output filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(outfname, optarg); break; case 's': *random_seed = atoi(optarg); if(*random_seed < 0) ERROR("-s value >= 0\n"); break; case 'n': *ncalcs = atoll(optarg); if(*ncalcs < 0) ERROR("-n value >= 0\n"); break; case 'w': *max_temp = atof(optarg); if(*max_temp <= 0) ERROR("-w value > 0\n"); break; case 'c': *min_temp = atof(optarg); if(*min_temp <= 0) ERROR("-c value > 0\n"); break; case 'g': *groups = atoi(optarg); if(*groups != 1 && *groups != 2 && *groups != 4) ERROR("-g value == 1 or 2 or 4\n"); break; case 'C': *cooling_cycle = atoi(optarg); if(*cooling_cycle <= 0) ERROR("-C value > 0\n"); break; case 'B': *enable_bfs = true; break; case 'd': *enable_detect_temp = true; break; case 'F': *fixed_temp = atof(optarg); if(*fixed_temp <= 0) ERROR("-F value > 0\n"); break; case 'Y': *enable_hill_climbing = true; break; case 'M': *enable_halfway = true; break; case 'h': default: print_help(argv[0]); } } } // The "edge" does not have NO_EDGE static int count_loop(const int lines, const int *edge) { int num = 0; for(int i=0;i<lines;i++) if(edge[i*2] == edge[i*2+1]) num++; return num; } static bool confirm_dist(const int v, const int w, const int height, const int low_length) { return (DISTANCE(v, w, height) <= low_length); } static void simple_exchange_edge(const int height, const int low_length, const int lines, int* edge) { while(1){ int e1, e2, new_e1_v, new_e1_w, new_e2_v, new_e2_w; do{ e1 = getRandom(lines); e2 = getRandom(lines); } while( e1 == e2 ); int e1_v = edge[e1*2]; int e1_w = edge[e1*2+1]; int e2_v = edge[e2*2]; int e2_w = edge[e2*2+1]; if(confirm_dist(e1_v, e2_v, height, low_length) && confirm_dist(e1_w, e2_w, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_v; new_e2_v = e1_w; new_e2_w = e2_w; } else if(confirm_dist(e1_v, e2_w, height, low_length) && confirm_dist(e1_w, e2_v, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_w; new_e2_v = e1_w; new_e2_w = e2_v; } else{ continue; } edge[2*e1] = new_e1_v; edge[2*e1+1] = new_e1_w; edge[2*e2] = new_e2_v; edge[2*e2+1] = new_e2_w; break; } } #ifdef _OPENMP static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int* degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int *degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; next[count++] = n; } } } return count; } #endif static int simple_bfs(const int nodes, const int max_degree, const int *degree, int *adjacency) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); int num_frontier = 1, root = 0, num = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = root; bitmap[root] = VISITED; while(1){ num_frontier = top_down_step(nodes, num_frontier, max_degree, degree, adjacency, frontier, next, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } for(int i=0;i<nodes;i++) if(bitmap[i] == NOT_VISITED) num++; free(bitmap); free(frontier); free(next); return num; } // Inherited from http://research.nii.ac.jp/graphgolf/c/create-lattice.c static void create_lattice(const int nodes, const int lines, const int width, const int height, const int max_degree, int *degree, const int low_length, int edge[lines*2]) { int i = 0; for(int x=0;x<width/2;x++){ for(int y=0;y<height;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = y + 2 * x * height; edge[i*2+1] = edge[2*i] + height; i++; } } } if(width%2 == 1){ for(int y=0;y<height/2;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = (width - 1) * height + 2 * y; edge[i*2+1] = edge[i*2] + 1; i++; } } /* add self-loop */ if(height%2 == 1){ for(int k=0;k<max_degree/2;k++){ edge[i*2] = edge[i*2+1] = nodes - 1; i++; } } } for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness simple_exchange_edge(height, low_length, lines, edge); // Make an unconnected graph a connected graph // Note that the connected graph after this operation may have loops. int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency); int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); int *tmp_edge = malloc(lines*2*sizeof(int)); while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])tmp_edge, adjacency); int tmp_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } // Remove loops min_num = count_loop(lines, edge); if(min_num != 0){ while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); int tmp_num = count_loop(lines, tmp_edge); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } } free(tmp_edge); free(adjacency); // for(int i=0;i<lines;i++) // printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), // WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); //EXIT(0); } static int count_lines(const char *fname) { FILE *fp = NULL; if((fp = fopen(fname, "r")) == NULL) ERROR("File not found\n"); int lines = 0, c; while((c = fgetc(fp)) != EOF) if(c == '\n') lines++; fclose(fp); return lines; } static void read_file_lattice(int *edge, int *w, int *h, const char *fname) { FILE *fp; if((fp = fopen(fname, "r")) == NULL){ PRINT_R0("File not found\n"); EXIT(1); } int n[4]; *w = 0; *h = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ *w = MAX(*w, n[0]); *h = MAX(*h, n[1]); *w = MAX(*w, n[2]); *h = MAX(*h, n[3]); } *w += 1; *h += 1; rewind(fp); int i = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ edge[i*2 ] = n[0] * (*h) + n[1]; edge[i*2+1] = n[2] * (*h) + n[3]; i++; } fclose(fp); } static int max_node_num(const int lines, const int edge[lines*2]) { int max = edge[0]; for(int i=1;i<lines*2;i++) max = MAX(max, edge[i]); return max; } static void count_degree(const int nodes, const int lines, const int edge[lines*2], int degree[nodes]) { for(int i=0;i<nodes;i++) degree[i] = 0; for(int i=0;i<lines*2;i++){ int e = edge[i]; if(e != NO_EDGE) degree[e]++; } } static void delete_corner(const int deleted_edges, const int lines, const int based_lines, const int nodes, const int width, const int height, const int groups, int edge[lines][2]) { if(deleted_edges == 1){ // When deleted_edges is 1, groups must be 1 or 2. if(groups == 1){ for(int i=0;i<lines;i++){ if(edge[i][0] == 0 || edge[i][1] == 0){ // left bottom edge[lines][0] = (edge[i][0] == 0)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } for(int i=0;i<lines;i++){ if(edge[i][0] == nodes-1 || edge[i][1] == nodes-1){ // right top edge[lines][1] = (edge[i][0] == nodes-1)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } } else if(groups == 2){ for(int i=0;i<lines;i++){ if(edge[i][0] == 0 || edge[i][1] == 0){ // left bottom edge[lines][0] = (edge[i][0] == 0)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; int tmp_line = (i+based_lines < lines)? i+based_lines : i+based_lines-lines; // right top edge[lines][1] = (edge[tmp_line][0] == nodes-1)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; break; } } } } else if(deleted_edges == 2){ if(groups == 1){ for(int i=0;i<lines;i++){ if(edge[i][0] == 0 || edge[i][1] == 0){ // left bottom edge[lines][0] = (edge[i][0] == 0)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } for(int i=0;i<lines;i++){ if(edge[i][0] == nodes-1 || edge[i][1] == nodes-1){ // right top edge[lines][1] = (edge[i][0] == nodes-1)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } for(int i=0;i<lines;i++){ if(edge[i][0] == height-1 || edge[i][1] == height-1){ // left top edge[lines+1][0] = (edge[i][0] == height-1)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } for(int i=0;i<lines;i++){ if(edge[i][0] == nodes-height || edge[i][1] == nodes-height){ // right bottom edge[lines+1][1] = (edge[i][0] == nodes-height)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; break; } } } else if(groups == 2){ for(int i=0;i<lines;i++){ if(edge[i][0] == 0 || edge[i][1] == 0){ // left bottom edge[lines][0] = (edge[i][0] == 0)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; int tmp_line = (i+based_lines < lines)? i+based_lines : i+based_lines-lines; // right top edge[lines][1] = (edge[tmp_line][0] == nodes-1)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; break; } } for(int i=0;i<lines;i++){ if(edge[i][0] == height-1 || edge[i][1] == height-1){ // left top edge[lines+1][0] = (edge[i][0] == height-1)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; int tmp_line = (i+based_lines < lines)? i+based_lines : i+based_lines-lines; // right bottom edge[lines+1][1] = (edge[tmp_line][0] == nodes-height)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; break; } } } else if(groups == 4){ for(int i=0;i<lines;i++){ if(edge[i][0] == 0 || edge[i][1] == 0){ // left bottom edge[lines][0] = (edge[i][0] == 0)? edge[i][1] : edge[i][0]; edge[i][0] = edge[i][1] = NO_EDGE; int tmp_line = (i+based_lines*2 < lines)? i+based_lines*2 : i+based_lines*2-lines; // right top edge[lines][1] = (edge[tmp_line][0] == nodes-1)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; tmp_line = (i+based_lines < lines)? i+based_lines : i+based_lines-lines; // left top edge[lines+1][0] = (edge[tmp_line][0] == height-1)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; tmp_line = (i+based_lines*3 < lines)? i+based_lines*3 : i+based_lines*3-lines; // right bottom edge[lines+1][1] = (edge[tmp_line][0] == nodes-height)? edge[tmp_line][1] : edge[tmp_line][0]; edge[tmp_line][0] = edge[tmp_line][1] = NO_EDGE; break; } } } } if(deleted_edges == 1){ // printf("%d,%d %d,%d\n", // WIDTH(edge[lines][0],height), HEIGHT(edge[lines][0],height), // WIDTH(edge[lines][1],height), HEIGHT(edge[lines][1],height)); // printf("%d\n", DISTANCE(edge[lines][0], edge[lines][1], height)); int length = DISTANCE(edge[lines][0], edge[lines][1], height); while(length > low_length){ for(int i=0;i<lines;i++){ } } } else if(deleted_edges == 2){ } } static void verfy_graph(const int nodes, const int lines, const int edge[lines*2], const int height, const int low_length, const int max_degree) { PRINT_R0("Verifing a regular graph... "); for(int i=0;i<lines;i++){ if(edge[i*2] != NO_EDGE) if(DISTANCE(edge[i*2], edge[i*2+1], height) > low_length) ERROR("Over length in line %d: length = %d, distance = %d\n", i+1, low_length, DISTANCE(edge[i*2], edge[i*2+1], height)); } int degree[nodes]; count_degree(nodes, lines, edge, degree); for(int i=0;i<nodes;i++) if(degree[i] > max_degree) ERROR("Degree is over %d\n", degree[i]); PRINT_R0("OK\n"); } static void create_symmetric_edge(int *edge, const int based_nodes, const int based_lines, const int groups, const int max_degree, int *degree, const int nodes, const int lines, const int height, const int width, const int based_height, const int low_length) { for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[i*2+j] = WIDTH(edge[i*2+j], based_height) * height + HEIGHT(edge[i*2+j], based_height); if(groups == 2){ for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[(based_lines+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); } else if(groups == 4){ for(int i=0;i<based_lines;i++){ for(int j=0;j<2;j++){ edge[(based_lines +i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 90); edge[(based_lines*2+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); edge[(based_lines*3+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 270); } } } int *tmp_edge = malloc(lines*2*sizeof(int)); int *tmp_degree = malloc(nodes*sizeof(int)); int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness exchange_edge(nodes, lines, (int (*)[2])edge, height, width, groups, low_length, 0); create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency); int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(min_num != 0){ while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); memcpy(tmp_degree, degree, sizeof(int)*nodes); exchange_edge(nodes, lines, (int (*)[2])tmp_edge, height, width, groups, low_length, 0); create_adjacency(nodes, lines, max_degree, tmp_degree, (const int (*)[2])tmp_edge, adjacency); int tmp_num = simple_bfs(nodes, max_degree, tmp_degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); memcpy(degree, tmp_degree, sizeof(int)*nodes); } } } } free(tmp_edge); free(tmp_degree); free(adjacency); } static int dist(const int x1, const int y1, const int x2, const int y2) { return(abs(x1 - x2) + abs(y1 - y2)); } static void lower_bound_of_diam_aspl(int *low_diam, double *low_ASPL, const int m, const int n, const int max_degree, const int length) { int moore[m*n], hist[m*n], mh[m*n]; int mn = m * n, current = max_degree, ii; double sum = 0; moore[0] = 1; moore[1] = max_degree + 1; for(ii=2;;ii++){ current = current * (max_degree - 1); moore[ii] = moore[ii-1] + current; if(moore[ii] >= mn){ moore[ii] = mn; break; } } int maxhop = MAX((m+n-2+(length-1))/length, ii); for(int i=ii+1;i<=maxhop;i++) moore[i] = mn; for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ for(int k=0;k<=maxhop;k++) hist[k] = 0; for (int i2=0;i2<m;i2++) for(int j2=0;j2<n;j2++) hist[(dist(i,j,i2,j2)+length-1)/length]++; for(int k=1;k<=maxhop;k++) hist[k] += hist[k-1]; for(int k=0;k<=maxhop;k++) mh[k] = MIN(hist[k], moore[k]); for(int k=1;k<=maxhop;k++) sum += (double)(mh[k] - mh[k-1]) * k; } } int dboth = 0; for(dboth=0;;dboth++) if(mh[dboth] == mn) break; *low_diam = dboth; *low_ASPL = sum/((double)mn*(mn-1)); } static void output_params(const int max_degree, const int groups, const int low_length, const int random_seed, const double max_temp, const double min_temp, const long long ncalcs, const int cooling_cycle, const double cooling_rate, const char *infname, const char *outfname, const double average_time, const int deleted_edges, const bool enable_hill_climbing, const int width, const int height, const bool enable_bfs, const bool enable_fixed_temp, const double fixed_temp) { #ifdef NDEBUG PRINT_R0("NO DEBUG MODE\n"); #else PRINT_R0("DEBUG MODE\n"); #endif PRINT_R0("Seed : %d\n", random_seed); PRINT_R0("Processes: %d\n", procs); #ifdef _OPENMP PRINT_R0("Threads : %d\n", omp_get_max_threads()); #endif if(enable_bfs) PRINT_R0("APSP : BFS\n"); else PRINT_R0("APSP : MATRIX Opetation\n"); if(enable_hill_climbing) PRINT_R0("Algorithm: Hill climbing Method\n"); else{ if(enable_fixed_temp) PRINT_R0("Algorithm: Fixed Temperature Simulated Annealing : %f\n", fixed_temp); else PRINT_R0("Algorithm: Simulated Annealing\n"); PRINT_R0(" MAX Temperature: %f\n", max_temp); PRINT_R0(" MIN Temperature: %f\n", min_temp); PRINT_R0(" Cooling Cycle: %d\n", cooling_cycle); PRINT_R0(" Cooling Rate : %f\n", cooling_rate); } if(groups != 1) PRINT_R0(" Groups : %d\n", groups); PRINT_R0("Num. of Calulations: %lld\n", ncalcs); PRINT_R0(" Average APSP time : %f sec.\n", average_time); PRINT_R0(" Estimated elapse time: %f sec.\n", average_time * ncalcs); if(infname[0] != NOT_C_DEFINED) PRINT_R0("Input filename: %s\n", infname); PRINT_R0(" (w x h, d, r) = (%d x %d, %d, %d)\n", width, height, max_degree, low_length); if(deleted_edges != 0) PRINT_R0(" delete_edges = %d\n", deleted_edges); if(outfname[0] != NOT_C_DEFINED) PRINT_R0("Output filename: %s\n", outfname); PRINT_R0("---\n"); } static void output_file(FILE *fp, const int lines, const int height, const int edge[lines*2]) { for(int i=0;i<lines;i++) if(edge[i*2] != NO_EDGE) fprintf(fp, "%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); } int main(int argc, char *argv[]) { bool enable_hill_climbing = false, enable_detect_temp = false, enable_bfs = false, enable_halfway = false; char hostname[MPI_MAX_PROCESSOR_NAME]; char infname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}, outfname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}; int random_seed = 0, cooling_cycle = 1, groups = 1, deleted_edges = 0; int namelen, based_lines, lines, based_width, based_height, based_nodes, nodes; int diam = NOT_N_DEFINED, max_degree = NOT_N_DEFINED, low_diam = NOT_N_DEFINED; int width = NOT_N_DEFINED, height = NOT_N_DEFINED, low_length = NOT_N_DEFINED; long long ncalcs = DEFAULT_NCALCS, num_accepts = 0; double ASPL = NOT_N_DEFINED, low_ASPL = NOT_N_DEFINED, cooling_rate = NOT_N_DEFINED, max_diff_energy = NOT_N_DEFINED; double max_temp = NOT_N_DEFINED, min_temp = NOT_N_DEFINED, fixed_temp = NOT_N_DEFINED; int *edge = NULL, *degree = NULL; FILE *fp = NULL; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); MPI_Get_processor_name(hostname, &namelen); // PRINT_R0("Run on %s\n", hostname); // time_t t = time(NULL); // PRINT_R0("%s---\n", ctime(&t)); // Set arguments set_args(argc, argv, infname, &low_length, outfname, &random_seed, &ncalcs, &max_temp, &min_temp, &groups, &cooling_cycle, &enable_hill_climbing, &enable_detect_temp, &enable_bfs, &enable_halfway, &fixed_temp, &width, &height, &max_degree, &deleted_edges); // Set other arguments bool enable_max_temp = (max_temp != NOT_N_DEFINED); bool enable_min_temp = (min_temp != NOT_N_DEFINED); bool enable_fixed_temp = (fixed_temp != NOT_N_DEFINED); bool enable_infname = (infname[0] != NOT_C_DEFINED); bool enable_outfname = (outfname[0] != NOT_C_DEFINED); bool enable_whd = (width != NOT_N_DEFINED && height != NOT_N_DEFINED && max_degree != NOT_N_DEFINED); // Check arguments if(low_length == NOT_N_DEFINED) ERROR("Must need -R\n"); if(enable_hill_climbing && enable_max_temp) ERROR("Both -Y and -w cannot be used.\n"); if(enable_hill_climbing && enable_min_temp) ERROR("Both -Y and -c cannot be used.\n"); if(enable_hill_climbing && enable_detect_temp) ERROR("Both -Y and -d cannot be used.\n"); if(!enable_infname && !enable_whd) ERROR("Must set -f or \"-W and -H and -D\"\n"); if(enable_halfway && !enable_infname) ERROR("Must set both -M and -f\n"); if(!enable_max_temp) max_temp = 100.0; if(!enable_min_temp) min_temp = 0.217147; if(max_temp == min_temp) ERROR("The same values in -w and -c.\n"); if(deleted_edges == 1 && groups == 4) ERROR("When -e is 1, -g must be 1 or 2.\n"); if(enable_detect_temp) ncalcs = DEFAULT_DETECT_NCALS; srandom(random_seed); if(enable_infname){ ERROR("NOT implement yet\n"); based_lines = count_lines(infname); lines = (enable_halfway)? based_lines : based_lines * groups; edge = malloc(sizeof(int)*lines*2); // int edge[lines][2]; read_file_lattice(edge, &based_width, &based_height, infname); based_nodes = max_node_num(based_lines, (int *)edge) + 1; if(enable_halfway){ based_nodes /= groups; based_lines /= groups; if(groups == 2){ based_height /= 2; } else if(groups == 4){ based_width /= 2; based_height /= 2; } } if(groups == 1){ height = based_height; width = based_width; } else if(groups == 2){ height = based_height * 2; width = based_width; } else{ // groups == 4 height = based_height * 2; width = based_width * 2; } nodes = based_nodes * groups; max_degree = 2 * lines / nodes; } else{ nodes = width * height; based_nodes = nodes / groups; lines = nodes * max_degree / 2; based_lines = lines / groups; edge = malloc(sizeof(int)*(lines+deleted_edges)*2); degree = malloc(sizeof(int)*nodes); // int degree[nodes]; if(groups == 1){ based_width = width; based_height = height; } else if(groups == 2){ based_width = width; based_height = height/2; } else{ // groups == 4 based_width = width/2; based_height = height/2; } } if(groups == 4 && (based_width != based_height)) ERROR("When g = 4, width(%d) must be equal to height(%d).\n", based_width, based_height); else if(groups == 4 && width%2 != 0 && height%2 != 0) ERROR("When g = 4, width(%d) and height(%d) must be divisible by 2.\n", width, height); else if(groups == 2 && height%2 != 0) ERROR("When g = 2, height(%d) must be divisible by 2.\n", height); else if(nodes%groups != 0) ERROR("nodes(%d) must be divisible by groups(%d)\n", nodes, groups); else if(lines%groups != 0) ERROR("(nodes*max_degree/2) must be divisible by groups(%d)\n", groups); else if(based_width*based_height != based_nodes) ERROR("Not grid graph (width %d x height %d != nodes %d).\n", based_width, based_height, based_nodes); if(!enable_infname) create_lattice(based_nodes, based_lines, based_width, based_height, max_degree, degree, low_length, edge); int *rotate_hash = malloc(nodes * sizeof(int)); create_rotate_hash(nodes, height, width, groups, rotate_hash); if(!enable_halfway && groups != 1) create_symmetric_edge(edge, based_nodes, based_lines, groups, max_degree, degree, nodes, lines, height, width, based_height, low_length); if(deleted_edges != 0) delete_corner(deleted_edges, lines, based_lines, nodes, width, height, groups, (int (*)[2])edge); lines += deleted_edges; EXIT(0); for(int i=0;i<lines;i++) if(edge[i*2] != NO_EDGE) printf("%d,%d %d,%d\n", WIDTH(edge[i*2 ], height), HEIGHT(edge[i*2 ], height), WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); EXIT(0); verfy_graph(nodes, lines, edge, height, low_length, max_degree); lower_bound_of_diam_aspl(&low_diam, &low_ASPL, width, height, max_degree, low_length); check_current_edge(nodes, lines, max_degree, degree, edge, low_ASPL, low_diam, groups, height, based_height, enable_bfs, rotate_hash); double average_time = estimated_elapse_time(nodes, lines, max_degree, degree, edge, height, width, based_height, groups, low_length, enable_bfs, rotate_hash); if(enable_hill_climbing){ fixed_temp = max_temp = min_temp = 0.0; cooling_rate = 1.0; } else{ cooling_rate = pow(min_temp/max_temp, (double)cooling_cycle/ncalcs); } if(enable_outfname && rank == 0){ struct stat stat_buf; if(stat(outfname, &stat_buf) == 0) ERROR("Output file %s exsits. \n", outfname); if((fp = fopen(outfname, "w")) == NULL) ERROR("Cannot open %s\n", outfname); } output_params(max_degree, groups, low_length, random_seed, max_temp, min_temp, ncalcs, cooling_cycle, cooling_rate, infname, outfname, average_time, deleted_edges, enable_hill_climbing, width, height, enable_bfs, enable_fixed_temp, fixed_temp); // Optimization timer_clear_all(); timer_start(TIMER_SA); long long step = sa(nodes, lines, max_degree, degree, based_nodes, ncalcs, cooling_rate, low_diam, low_ASPL, enable_bfs, enable_hill_climbing, enable_detect_temp, &max_diff_energy, max_temp, min_temp, fixed_temp, edge, &diam, &ASPL, cooling_cycle, &num_accepts, width, based_width, height, based_height, low_length, groups, rotate_hash, enable_fixed_temp); timer_stop(TIMER_SA); if(enable_detect_temp){ // Set max temperature to accept it 50% in maximum diff energy. PRINT_R0("Proposed max temperature is %f\n", (-1.0 * max_diff_energy) / log(0.5)); // Set min temperature to accept it 0.01% in minimum diff energy. END("Proposed min temperature is %f\n", (-2.0) / log(0.0001)); } // Output results PRINT_R0("---\n"); PRINT_R0("Diam. k = %d ASPL l = %f Diam. gap = %d ASPL gap = %f\n", diam, ASPL, diam-low_diam, ASPL-low_ASPL); double time_sa = timer_read(TIMER_SA); double time_apsp = timer_read(TIMER_APSP); double time_check = timer_read(TIMER_CHECK); PRINT_R0("Steps: %lld Elapse time: %f sec. (APSP: %f sec. Check: %f sec. Other: %f sec.)\n", step, time_sa, time_apsp, time_check, time_sa-(time_apsp+time_check)); if(ncalcs > SKIP_ACCEPTS) PRINT_R0("Accept rate: %f (= %lld/%lld)\n", (double)num_accepts/(ncalcs-SKIP_ACCEPTS), num_accepts, ncalcs-SKIP_ACCEPTS); if(rank == 0 && enable_outfname){ output_file(fp, lines, height, edge); fclose(fp); } verfy_graph(nodes, lines, edge, height, low_length, max_degree); MPI_Finalize(); free(edge); free(degree); free(rotate_hash); return 0; }
pi_omp_loop_8.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> static long num_steps = 1024 * 1024 * 1024; double step; int main () { int i, t; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; for(t = 1; t <= 16; t*=2) { pi = 0; start_time = omp_get_wtime(); #pragma omp parallel num_threads(t) private(x) { #pragma omp for reduction(+:sum) for (i = 0; i < num_steps; i++){ x = (i + 0.5)*step; sum = sum + 4.0/(1.0+x*x); } } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time); } }
46a_so12_itt.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r7 = -2.98277778F * usol[t1][x - time + 12][y - time + 12][z + 12]; float r6 = 1.0 / dt; float r5 = 1.0 / (dt * dt); float r4 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]); usol[t0][x - time + 12][y - time + 12][z + 12] = (r4 * (-r5 * (-2.0F * usol[t1][x - time + 12][y - time + 12][z + 12] + usol[t2][x - time + 12][y - time + 12][z + 12])) + r6 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 12][y - time + 12][z + 12]) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 12][z + 6] + usol[t1][x - time + 12][y - time + 12][z + 18]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 12][z + 7] + usol[t1][x - time + 12][y - time + 12][z + 17]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 12][z + 8] + usol[t1][x - time + 12][y - time + 12][z + 16]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 12][z + 9] + usol[t1][x - time + 12][y - time + 12][z + 15]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 12][z + 10] + usol[t1][x - time + 12][y - time + 12][z + 14]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 12][z + 11] + usol[t1][x - time + 12][y - time + 12][z + 13])) / ((h_z * h_z)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 6][z + 12] + usol[t1][x - time + 12][y - time + 18][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 7][z + 12] + usol[t1][x - time + 12][y - time + 17][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 8][z + 12] + usol[t1][x - time + 12][y - time + 16][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 9][z + 12] + usol[t1][x - time + 12][y - time + 15][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 10][z + 12] + usol[t1][x - time + 12][y - time + 14][z + 12]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 11][z + 12] + usol[t1][x - time + 12][y - time + 13][z + 12])) / ((h_y * h_y)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 6][y - time + 12][z + 12] + usol[t1][x - time + 18][y - time + 12][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 7][y - time + 12][z + 12] + usol[t1][x - time + 17][y - time + 12][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 8][y - time + 12][z + 12] + usol[t1][x - time + 16][y - time + 12][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 9][y - time + 12][z + 12] + usol[t1][x - time + 15][y - time + 12][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 10][y - time + 12][z + 12] + usol[t1][x - time + 14][y - time + 12][z + 12]) + 1.71428571F * (usol[t1][x - time + 11][y - time + 12][z + 12] + usol[t1][x - time + 13][y - time + 12][z + 12])) / ((h_x * h_x))) / (r4 * r5 + r6 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 12][y - time + 12][zind + 12] += r0;} } } } } } } } } } /* End section0 */ __itt_pause(); return 0; }
omp_dbg.c
#include <stddef.h> #include <assert.h> #include <stdlib.h> #include <stdbool.h> #include <omp.h> #include <stdio.h> #include "list.h" #include "context_descriptor.h" #include "register_context.h" #include "dvmh_omp_interval.h" #include "dvmh_omp_thread_context.h" #include "dvmh_omp_runtime_context.h" #include "omp_dbg.h" #define ROOT_INTERVAL_CONTEXT_DESCRIPTOR "42*type=interval*file=MAIN*line1=0*line2=0**" #define METRICS_FILE_NAME "interval_stats_v2.csv" // We use this threadprivate variable to direct access to own thread_id from each thread. static int thread_id; // There is no guarantee that omp_get_wtime() return values are synced between threads. // We have to 'normalize' it's return values if we want to compare t1 and t2; t1 and t2 are obtained from different threads. static double world_start; #pragma omp threadprivate(thread_id, world_start) // We store runtime context here. static dvmh_omp_runtime_context_t *runtime_context = NULL; // Temporary list. We use this to store context descriptors. static list *registered_descriptors = NULL; #define _UNIX_UNDERSCORE_ #ifdef _WIN_INTEL_FORT_ #define DBG_Init_Handles DBG_INIT_HANDLES #endif #ifdef _UNIX_IBM_FORT_ #define DBG_Init_Handles dbg_init_handles #endif #ifdef _UNIX_UNDERSCORE_ #define DBG_Init_Handles dbg_init_handles_ #endif // It is some dirty trick. // Normally all DBG_Get_Handle functions are called inside of DBG_Init_Handles. // DBG_Init_Handles is defined in dbg_init.h header file. // We need that all DBG_Get_Handle functions be called before DBG_Init. // So, we explicitly call DBG_Get_Handle in DBG_Init. // We use 'handles_has_been_inited' flag in order to prevent // double-call DBG_Get_Handle for same context descriptors. void DBG_Init_Handles(void); static bool handles_has_been_inited = false; void DBG_Init(long *ThreadID) { DBG_Init_Handles(); handles_has_been_inited = true; assert(registered_descriptors != NULL); const int num_context_descriptors = list_size(registered_descriptors); assert(num_context_descriptors > 0); const int num_threads = omp_get_max_threads(); runtime_context = (dvmh_omp_runtime_context_t *) malloc(sizeof(dvmh_omp_runtime_context_t)); assert(runtime_context != NULL); dvmh_omp_runtime_context_init(runtime_context, num_threads, num_context_descriptors); // Copy pointers to registered descriptors of runtime_context. { list_iterator *it = list_iterator_new(registered_descriptors); while (list_iterator_has_next(it)) { context_descriptor *cd = (context_descriptor *) list_iterator_next(it); dvmh_omp_runtime_context_set_context_descriptor(runtime_context, cd, cd->info.id); } list_iterator_destroy(it); } // Set threadprivate variables #pragma omp parallel { thread_id = omp_get_thread_num(); #pragma omp barrier world_start = omp_get_wtime(); // By assigning zero we assume that omp_get_wtime() result is synced between threads. It's temporary solution. // It appears to be so (at least on intel compiler 'icc (ICC) 19.0.0.070 20180524'). // But this behavior is not guaranteed by the OpenMP standard. world_start = 0.0; } // Enter to the top level interval in master thread. const int root_interval_id = 0; dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_thread_context_enter_interval(thread_context, root_interval_id); // Stats dvmh_omp_interval_t *i = dvmh_omp_thread_context_current_interval(thread_context); dvmh_omp_interval_set_parent_id(i, DVMH_OMP_INTERVAL_NO_PARENT); // We subtract world_start because DBG_Finalize could be called from different threads. double now = omp_get_wtime() - world_start; dvmh_omp_interval_add_used_time(i, -now); dvmh_omp_interval_add_execution_count(i, 1L); // Calculate execution time. There is no need in lock. dvmh_omp_runtime_context_add_execution_time(runtime_context, root_interval_id, -now); dvmh_omp_runtime_context_set_interval_non_parallel(runtime_context, root_interval_id); return; }; static void print_interval_tree_csv( const char *filename, dvmh_omp_runtime_context_t *ctx, dvmh_omp_interval_t *tree); void DBG_Finalize() { list *registered_descriptors_copy; const int master_thread_id = 0; const int root_interval_id = 0; #pragma omp critical (dbg_finalize) { registered_descriptors_copy = registered_descriptors; registered_descriptors = NULL; } if (registered_descriptors_copy == NULL) return; // We have to close the root interval. dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, master_thread_id); dvmh_omp_interval_t *i = dvmh_omp_thread_context_current_interval(thread_context); // We subtract world_start because DBG_Finalize could be called from different threads. double now = omp_get_wtime() - world_start; // Stats dvmh_omp_interval_add_used_time(i, now); // There is no need in lock dvmh_omp_runtime_context_add_execution_time(runtime_context, root_interval_id, now); // Leave the top level interval in master thread dvmh_omp_thread_context_leave_interval(thread_context); dvmh_omp_interval_t *tree = dvmh_omp_runtime_context_integrate(runtime_context); // print out the results. print_interval_tree_csv(METRICS_FILE_NAME , runtime_context, tree); // cleanup stage dvmh_omp_runtime_context_integrated_free(runtime_context, tree); dvmh_omp_runtime_context_deinit(runtime_context); free(runtime_context); runtime_context = NULL; list_destroy_with(registered_descriptors_copy, (list_element_destroy_t *) unregister_context); handles_has_been_inited = false; }; void DBG_Get_Handle(long *StaticContextHandle, char* ContextString, long StringLength) { if (handles_has_been_inited) { return; } // Нужно убедиться, что в типе long можем хранить адрес. assert(sizeof(long) == sizeof(void *)); // Создаем список, если еще не создавали. if (registered_descriptors == NULL){ assert(registered_descriptors = list_create()); // create context descriptor for top level interval const int id = list_size(registered_descriptors); context_descriptor *cd = register_context(ROOT_INTERVAL_CONTEXT_DESCRIPTOR, id); list_append_tail(registered_descriptors, cd); } // Присваиваем id как порядковый номер регистрации. const int id = list_size(registered_descriptors); context_descriptor *cd = register_context(ContextString, id); list_append_tail(registered_descriptors, cd); *StaticContextHandle = (long) cd; } // We assume that there is no inner level parallelism. void DBG_BeforeParallel (long *StaticContextHandle, long *ThreadID, int *NumThreadsResults, int *IfExprResult) { // We are in master thread now dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i = dvmh_omp_thread_context_current_interval(thread_context); const int spawner_id = dvmh_omp_interval_get_id(i); dvmh_omp_runtime_context_set_threads_spawner_id(runtime_context, spawner_id); dvmh_omp_runtime_context_set_interval_threads_spawner(runtime_context, spawner_id); dvmh_omp_runtime_context_set_parallel_mode(runtime_context); }; void DBG_ParallelEvent (long *StaticContextHandle, long *ThreadID) { const bool is_master_thread = thread_id == 0; if (is_master_thread) return; dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); // Set inital 'root' interval for current thread in current parallel region const int spawner_id = dvmh_omp_runtime_context_get_threads_spawner_id(runtime_context); dvmh_omp_thread_context_enter_interval(thread_context, spawner_id); dvmh_omp_interval_t *i = dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_used_time(i, -now); dvmh_omp_interval_set_parent_id(i, DVMH_OMP_INTERVAL_NO_PARENT); }; void DBG_ParallelEventEnd (long *StaticContextHandle, long *ThreadID) { const bool is_master_thread = thread_id == 0; double now = omp_get_wtime(); dvmh_omp_runtime_context_end_parallel(runtime_context, thread_id, now - world_start); if (is_master_thread) return; // Leave thread spawner interval in slave threads. dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); dvmh_omp_interval_add_used_time(i, now); dvmh_omp_thread_context_leave_interval(thread_context); }; void DBG_AfterParallel (long *StaticContextHandle, long *ThreadID) { dvmh_omp_runtime_context_unset_parallel_mode(runtime_context); // We are in master thread now dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i = dvmh_omp_thread_context_current_interval(thread_context); const int interval_id = dvmh_omp_interval_get_id(i); double now = omp_get_wtime(); dvmh_omp_runtime_context_after_parallel(runtime_context, interval_id, now - world_start); dvmh_omp_runtime_context_unset_threads_spawner_id(runtime_context); }; void DBG_BeforeOMPLoop(long *StaticContextHandle, long *ThreadID, long *Init, long *Last, long *Step, int *ChunkSize){}; void DBG_OMPIter(long *StaticContextHandle, long *ThreadID, long *Index){}; void DBG_AfterOMPLoop (long *StaticContextHandle, long *ThreadID){}; void DBG_BeforeSections (long *StaticContextHandle, long *ThreadID){}; void DBG_AfterSections(long *StaticContextHandle, long *ThreadID){}; void DBG_SectionEvent(long *StaticContextHandle1, long *ThreadID){}; void DBG_SectionEventEnd(long *StaticContextHandle1, long *ThreadID){}; void DBG_BeforeSingle (long *StaticContextHandle, long *ThreadID){}; void DBG_SingleEvent(long *StaticContextHandle, long *ThreadID){}; void DBG_SingleEventEnd(long *StaticContextHandle, long *ThreadID){}; void DBG_AfterSingle (long *StaticContextHandle, long *ThreadID){}; void DBG_BeforeWorkshare (long *StaticContextHandle, long *ThreadID){}; void DBG_AfterWorkshare(long *StaticContextHandle, long *ThreadID){}; void DBG_MasterBegin(long *StaticContextHandle, long *ThreadID){}; void DBG_MasterEnd(long *StaticContextHandle, long *ThreadID){}; void DBG_BeforeCritical (long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_critical_time(i, -now); }; void DBG_CriticalEvent(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_critical_time(i, now); }; void DBG_CriticalEventEnd(long *StaticContextHandle, long *ThreadID){}; void DBG_AfterCritical(long *StaticContextHandle, long *ThreadID){}; void DBG_BeforeBarrier(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_barrier_time(i, -now); }; void DBG_AfterBarrier(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_barrier_time(i, now); }; void DBG_FlushEvent(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_flush_time(i, -now); }; void DBG_FlushEventEnd(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_flush_time(i, now); }; void DBG_BeforeOrdered (long *StaticContextHandle, long *ThreadID){}; void DBG_OrderedEvent(long *StaticContextHandle, long *ThreadID){}; void DBG_AfterOrdered(long *StaticContextHandle, long *ThreadID){}; void DBG_ThreadPrivateEvent(long *StaticContextHandle, long *ThreadID){}; void DBG_RegVar(long *StaticContextHandle, long *ThreadID, void*pAddr){}; void DBG_RegArr(long *StaticContextHandle, long *ThreadID, long *ArrSize, void* pAddr){}; void DBG_RegCommon(long *StaticContextHandle, long *ThreadID){}; void DBG_ReadVar(long* StaticContextHandle, long *ThreadID, void*pAddr, long *var_name){}; void DBG_ReadArr(long* StaticContextHandle, long *ThreadID, void*pAddr, long *var_name, void*pBase){}; void DBG_WriteVarBegin(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name){}; void DBG_WriteArrBegin(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name, void*pBase){}; void DBG_WriteEnd(long* StaticContextHandle, long *ThreadID, void*pAddr, long* var_name){}; void DBG_BegSL(long *StaticContextHandle, long *ThreadID, long *Init, long *Last, long *Step){}; void DBG_SIter(long *StaticContextHandle, long *ThreadID, long *Index){}; void DBG_EndSL(long *StaticContextHandle, long *ThreadID){}; void DBG_BeforeFuncCall(long *StaticContextHandle, long *ThreadID){}; void DBG_FuncParVar(long *StaticContextHandle, long *ThreadID, int *Position, void*pAddr, long *var_name, int *IsRead){}; void DBG_FuncParArr(long *StaticContextHandle, long *ThreadID, int *Position, void*pAddr, long *var_name, void*pBase, int *IsRead){}; void DBG_AfterFuncCall(long *StaticContextHandle, long *ThreadID){}; void DBG_FuncBegin(long *StaticContextHandle, long *ThreadID){}; void DBG_FuncEnd(long *StaticContextHandle, long *ThreadID){}; void DBG_RegParVar(long *StaticContextHandle, long *ThreadID, void*pAddr, int *Position){}; void DBG_RegParArr(long *StaticContextHandle, long *ThreadID, long *ArrSize, void*pAddr, int *Position){}; void DBG_SIfIter(long *StaticContextHandle, long *ThreadID, long *Index, long *IfVar){}; void DBG_OMPIfIter(long *StaticContextHandle, long *ThreadID, long *Index, long *IfVar){}; void DBG_Type_Control(){}; long DBG_Get_Addr(void *VarPtr) { return (long) VarPtr; }; void DBG_BeforeIO(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_io_time(i, -now); }; void DBG_AfterIO(long *StaticContextHandle, long *ThreadID) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); double now = omp_get_wtime(); dvmh_omp_interval_add_io_time(i, now); }; void DBG_BeforeInterval (long *StaticContextHandle, long *ThreadID, long *IntervalIndex) { context_descriptor *cd = (context_descriptor *) *StaticContextHandle; const int interval_id = cd->info.id; dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *parent = dvmh_omp_thread_context_current_interval(thread_context); const int parent_id = dvmh_omp_interval_get_id(parent); dvmh_omp_thread_context_enter_interval(thread_context, interval_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); dvmh_omp_interval_set_parent_id(i, parent_id); dvmh_omp_interval_add_execution_count(i, 1L); double now; // calculate execution time. if ( dvmh_omp_runtime_context_is_parallel_mode(runtime_context)) { now = omp_get_wtime(); double normalized_now = now - world_start; dvmh_omp_runtime_context_lock_interval(runtime_context, interval_id); if (dvmh_omp_runtime_context_get_interval_visitors(runtime_context, interval_id) == 0) { dvmh_omp_runtime_context_add_execution_time(runtime_context, interval_id, -normalized_now); } dvmh_omp_runtime_context_inc_interval_visitors(runtime_context, interval_id); dvmh_omp_runtime_context_unlock_interval(runtime_context, interval_id); } else { // Don't use lock if it is non-parallel region. now = omp_get_wtime(); dvmh_omp_runtime_context_set_interval_non_parallel(runtime_context, interval_id); dvmh_omp_runtime_context_add_execution_time(runtime_context, interval_id, -now); } dvmh_omp_interval_add_used_time(i, -now); }; void DBG_AfterInterval (long *StaticContextHandle, long *ThreadID, long *IntervalIndex) { dvmh_omp_thread_context_t *thread_context = dvmh_omp_runtime_context_get_thread_context(runtime_context, thread_id); dvmh_omp_interval_t *i= dvmh_omp_thread_context_current_interval(thread_context); const int interval_id = dvmh_omp_interval_get_id(i); double now; // calculate execution time. if (dvmh_omp_runtime_context_is_parallel_mode(runtime_context)) { now = omp_get_wtime(); double normalized_now = now - world_start; dvmh_omp_runtime_context_lock_interval(runtime_context, interval_id); dvmh_omp_runtime_context_dec_interval_visitors(runtime_context, interval_id); if (dvmh_omp_runtime_context_get_interval_visitors(runtime_context, interval_id) == 0) { dvmh_omp_runtime_context_add_execution_time(runtime_context, interval_id, normalized_now); } dvmh_omp_runtime_context_unlock_interval(runtime_context, interval_id); } else { // Don't use lock if it is non-parallel region. now = omp_get_wtime(); dvmh_omp_runtime_context_add_execution_time(runtime_context, interval_id, now); } dvmh_omp_interval_add_used_time(i, now); dvmh_omp_thread_context_leave_interval(thread_context); }; static void print_interval_node( FILE *fd, dvmh_omp_runtime_context_t *ctx, dvmh_omp_interval_t *node ) { assert(node != NULL); if (dvmh_omp_interval_has_subintervals(node)) { dvmh_omp_subintervals_iterator_t *it = dvmh_omp_subintervals_iterator_new(node); while (dvmh_omp_subintervals_iterator_has_next(it)) { dvmh_omp_interval_t *child = dvmh_omp_subintervals_iterator_next(it); print_interval_node(fd, ctx, child); } dvmh_omp_subintervals_iterator_destroy(it); } const int id = dvmh_omp_interval_get_id(node); context_descriptor *cd = dvmh_omp_runtime_context_context_descriptor(ctx, id); /* write row */ fprintf(fd, "%d,", id); fprintf(fd, "%d,", dvmh_omp_interval_execution_count(node)); fprintf(fd, "%lf,", dvmh_omp_interval_io_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_execution_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_sync_barrier_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_used_time(node)); fprintf(fd, "%d,", dvmh_omp_interval_used_threads_num(node)); fprintf(fd, "%lf,", dvmh_omp_interval_idle_critical_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_sync_flush_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_idle_parallel_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_load_imbalance_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_thread_prod_max(node)); fprintf(fd, "%lf,", dvmh_omp_interval_thread_prod_min(node)); fprintf(fd, "%lf,", dvmh_omp_interval_thread_prod_avg(node)); fprintf(fd, "%lf,", dvmh_omp_interval_total_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_lost_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_productive_time(node)); fprintf(fd, "%lf,", dvmh_omp_interval_insufficient_parallelism(node)); fprintf(fd, "%lf,", dvmh_omp_interval_efficiency(node)); fprintf(fd, "%d,", dvmh_omp_interval_is_in_parallel(node)); fprintf(fd, "%d,", cd->info.begin_line); fprintf(fd, "%s,", cd->info.file_name); if (dvmh_omp_interval_get_id(node) == 0) { fprintf(fd, "%d", dvmh_omp_interval_get_parent_id(node)); } else { fprintf(fd, "%d\r\n", dvmh_omp_interval_get_parent_id(node)); } } static void print_interval_tree_csv( const char *filename, dvmh_omp_runtime_context_t *ctx, dvmh_omp_interval_t *tree) { assert(tree != NULL); assert(filename != NULL); FILE *fd = fopen(filename, "w"); assert(fd != NULL); /* write header */ fprintf(fd, "id,"); fprintf(fd, "calls count,"); fprintf(fd, "io time,"); fprintf(fd, "execution time,"); fprintf(fd, "idle barrier time,"); fprintf(fd, "used time,"); fprintf(fd, "used threads,"); fprintf(fd, "idle critical time,"); fprintf(fd, "flush time,"); fprintf(fd, "idle parallel,"); fprintf(fd, "load imbalance time,"); fprintf(fd, "thread prod max,"); fprintf(fd, "thread prod min,"); fprintf(fd, "thread prod avg,"); fprintf(fd, "total time,"); fprintf(fd, "lost time,"); fprintf(fd, "productive time,"); fprintf(fd, "insufficient parallelism,"); fprintf(fd, "efficiency,"); fprintf(fd, "in parallel,"); fprintf(fd, "begin line,"); fprintf(fd, "file name,"); fprintf(fd, "parent id\r\n"); /* write rows */ print_interval_node(fd, ctx, tree); fclose(fd); }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double ConstrainPixelOffset(double x) { if (x < (double) -(SSIZE_MAX-512)) return((double) -(SSIZE_MAX-512)); if (x > (double) (SSIZE_MAX-512)) return((double) (SSIZE_MAX-512)); return(x); } static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return((ssize_t) floor(ConstrainPixelOffset(x))); return((ssize_t) ceil(ConstrainPixelOffset(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { Image *trim_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
lloyds_par24.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <stdbool.h> #include <omp.h> #include "csvparser.h" void vector_init(double *a, int length) { for (int i = 0; i < length; i++) { a[i] = 0; } } void vector_copy(double *dst, double *src, int length) { for (int i = 0; i < length; i++) { dst[i] = src[i]; } } void vector_add(double *dst, double *a, double *b, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] + b[i]; } } void vector_elementwise_avg(double *dst, double *a, int denominator, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] / denominator; } } // Program should take K, a data set (.csv), a delimiter, // a binary flag data_contains_header, and a binary flag to drop labels int main(int argc, char *argv[]){ // Seed for consistent cluster center selection // In a working implementation, seeding would be variable (e.g. time(NULL)) srand(111); CsvParser *reader; CsvRow *row; int i,j; if(argc < 6){ printf("Incorrect number of args. Should be 5, received %d\n", argc - 1); exit(1); } int K = atoi(argv[1]); char *data_fp = argv[2]; char *delimiter = argv[3]; int has_header_row = atoi(argv[4]); int drop_labels = atoi(argv[5]); // Take in data set reader = CsvParser_new(data_fp, delimiter, has_header_row); // Get number of columns row = CsvParser_getRow(reader); int num_cols = CsvParser_getNumFields(row); CsvParser_destroy_row(row); if (drop_labels){ num_cols--; } // Get number of rows like lazy people int num_rows = 1; while ((row = CsvParser_getRow(reader))){ num_rows++; CsvParser_destroy_row(row); } // Torch the CsvParser and start again so we can read data in. CsvParser_destroy(reader); reader = CsvParser_new(data_fp, delimiter, has_header_row); double **data_matrix = malloc(num_rows * sizeof(double *)); for (int i = 0; i < num_rows; i++) { data_matrix[i] = malloc(num_cols * sizeof(double)); } int row_index = 0; while ((row = CsvParser_getRow(reader))){ const char **row_fields = CsvParser_getFields(row); for (int col_index = 0; col_index < num_cols; col_index++) { data_matrix[row_index][col_index] = atof(row_fields[col_index]); } CsvParser_destroy_row(row); row_index++; } CsvParser_destroy(reader); // Initialize some cluster centers from random rows in our data // Given the fact that we will usually have way more rows than centers, we can // probably just roll a number and reroll if we already rolled it. Collisions // should be relatively infrequent bool collided; double centers[K][num_cols]; if (argc == 7) { int center_indices[3] = {12, 67, 106}; for (i = 0; i < K; i ++) { vector_copy(centers[i], data_matrix[center_indices[i]], num_cols); } } else { for (i = 0; i < K; i++) { int center_indices[K]; collided = true; while (collided) { center_indices[i] = rand() % num_rows; collided = false; for (j = 0; j < i; j++) { if (center_indices[j] == center_indices[i]) { collided = true; break; } } vector_copy(centers[i], data_matrix[center_indices[i]], num_cols); } } } printf("Initial cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } printf("\n"); int num_iterations = 0; int *clusterings = calloc(num_rows, sizeof(int)); bool changes; double tstart = omp_get_wtime(); while (1) { // Assign points to cluster centers changes = false; omp_set_num_threads(24); int center, observation, new_center, col; double idx_diff, current_diff, best_diff; #pragma omp parallel for \ private(center, observation, idx_diff, current_diff, best_diff, new_center, col) \ shared(num_rows, K, data_matrix, centers) for (observation = 0; observation < num_rows; observation++) { best_diff = INFINITY; for (center = 0; center < K; center++) { current_diff = 0; for (col = 0; col < num_cols; col++) { idx_diff = data_matrix[observation][col] - centers[center][col]; current_diff += idx_diff * idx_diff; } if (current_diff < best_diff) { best_diff = current_diff; new_center = center; } } if (clusterings[observation] != new_center) { // NOTE: There is an acceptable data race on changes. Threads only ever // set it to true; lost updates are inconsequential. No need to slow // things down for safety. changes = true; clusterings[observation] = new_center; } } // If we didn't change any cluster assignments, we're at convergence if (!changes) { break; } num_iterations++; // Find cluster means and reassign centers int cluster_index, element, elements_in_cluster; double cluster_means[num_cols]; #pragma omp parallel for \ private(cluster_index, element, elements_in_cluster, cluster_means) \ shared(num_rows, clusterings, data_matrix, K) for (cluster_index = 0; cluster_index < K; cluster_index++) { elements_in_cluster = 0; vector_init(cluster_means, num_cols); // Aggregate in-cluster values we can use to take the clusterings mean for (element = 0; element < num_rows; element++) { if (clusterings[element] == cluster_index) { vector_add(cluster_means, cluster_means, data_matrix[element], num_cols); elements_in_cluster++; } } // Finish calculating cluster mean, and overwrite centers with the new value vector_elementwise_avg(cluster_means, cluster_means, elements_in_cluster, num_cols); vector_copy(centers[cluster_index], cluster_means, num_cols); } } double tend = omp_get_wtime(); printf("\nFinal cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } printf("\nNum iterations: %d\n", num_iterations); printf("Time taken for %d clusters: %f seconds\n", K, tend - tstart); for (int i = 0; i < num_rows; i++) { free(data_matrix[i]); } free(data_matrix); free(clusterings); exit(0); }
ncra.c
/* $Header$ */ /* This single source file compiles into one executable that behaves as three different commands depending on invocation name: ncra -- netCDF record averager nces -- netCDF ensemble statistics ncrcat -- netCDF record concatenator */ /* Purpose: Compute averages or extract series of specified hyperslabs of specfied variables of multiple input netCDF files and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* URL: https://github.com/nco/nco/tree/master/src/nco/ncra.c Usage: ncra -O -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc ncra -O -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc ncra -O -n 3,4,1 -p /ZENDER/tmp -l ${HOME}/nco/data h0001.nc ~/foo.nc ncrcat -O -C -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -C -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -C --mro -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -w 1,2,3 -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc ncra -O -w one_dmn_rec_var -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc scp ~/nco/src/nco/ncra.c esmf.ess.uci.edu:nco/src/nco nces in.nc in.nc ~/foo.nc nces -O -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc nces -O -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc nces -O -n 3,4,1 -p /ZENDER/tmp -l ${HOME} h0001.nc ~/foo.nc ncra -Y ncge -O -p ~/nco/data mdl_1.nc ~/foo.nc ncra -Y ncge -O --nsm_sfx=_avg -p ~/nco/data mdl_1.nc ~/foo.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* abs, getopt, malloc, strtol */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #ifndef _MSC_VER # include <unistd.h> /* POSIX stuff */ #endif #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ #ifdef I18N # include <langinfo.h> /* nl_langinfo() */ # include <libintl.h> /* Internationalization i18n */ # include <locale.h> /* Locale setlocale() */ # define _(sng) gettext (sng) # define gettext_noop(sng) (sng) # define N_(sng) gettext_noop(sng) #endif /* I18N */ /* Supply stub gettext() function in case i18n failed */ #ifndef _LIBINTL_H # define gettext(foo) foo #endif /* _LIBINTL_H */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include <netcdf_par.h> /* Parallel netCDF definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* Personal headers */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "nco.h" /* netCDF Operator (NCO) definitions */ #include "libnco.h" /* netCDF Operator (NCO) library */ /* Define inline'd functions in header so source is visible to calling files C99 only: Declare prototype in exactly one header http://www.drdobbs.com/the-new-c-inline-functions/184401540 */ extern int min_int(int a, int b); extern int max_int(int a, int b); inline int min_int(int a, int b){return (a < b) ? a : b;} inline int max_int(int a, int b){return (a > b) ? a : b;} extern long min_lng(long a, long b); extern long max_lng(long a, long b); inline long min_lng(long a, long b){return (a < b) ? a : b;} inline long max_lng(long a, long b){return (a > b) ? a : b;} int main(int argc,char **argv) { char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **grp_lst_in=NULL_CEWI; char **var_lst_in=NULL_CEWI; char **wgt_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *clm_nfo_sng=NULL; /* [sng] Climatology information string */ char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *grp_out_fll=NULL; /* [sng] Group name */ char *lmt_arg[NC_MAX_DIMS]; char *nco_op_typ_sng=NULL_CEWI; /* [sng] Operation type Option y */ char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */ char *nsm_sfx=NULL; /* [sng] Ensemble suffix */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char *wgt_nm=NULL_CEWI; /* [sng] Weight variable */ char trv_pth[]="/"; /* [sng] Root path of traversal tree */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567ACcD:d:FG:g:HhL:l:Nn:Oo:p:P:rRt:v:w:X:xY:y:-:"; clm_bnd_sct *cb=NULL; cnk_sct cnk; /* [sct] Chunking structure */ cnv_sct *cnv; /* [sct] Convention structure */ #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.flg_ddra=False}; #endif /* !__cplusplus */ dmn_sct **dim=NULL; /* CEWI */ dmn_sct **dmn_out=NULL; /* CEWI */ double *wgt_arr=NULL; /* Option w */ double wgt_avg_scl=0.0; /* [frc] Scalar version of wgt_avg */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */ int *in_id_arr; const int rec_dmn_idx=0; /* [idx] Assumed index of current record dimension where zero assumes record is leading dimension */ int abb_arg_nbr=0; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_rec_fl; int fl_idx; int fl_in_fmt; /* [enm] Input file format */ int fl_nbr=0; int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int flg_input_complete_nbr=0; /* [nbr] Number of record dimensions completed */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int grp_id; /* [ID] Group ID */ int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */ int grp_out_id; /* [ID] Group ID (output) */ int idx=int_CEWI; int idx_rec=0; /* [idx] Index that iterates over number of record dimensions */ int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_xtr=0; int nbr_rec; /* [nbr] (ncra) Number of record dimensions */ int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int nco_op_typ=nco_op_avg; /* [enm] Default operation is averaging */ int nco_pck_plc=nco_pck_plc_nil; /* [enm] Default packing is none */ int opt; int out_id; int ppc_nbr=0; /* [nbr] Number of PPC arguments */ int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; int var_out_id; /* [ID] Variable ID (output) */ int wgt_nbr=0; int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ lmt_sct **lmt_rec=NULL; /* [lst] (ncra) Record dimensions */ long idx_rec_crr_in; /* [idx] Index of current record in current input file */ long *idx_rec_out=NULL; /* [idx] Index of current record in output file (0 is first, ...) */ long ilv_srd; /* [idx] Interleave stride */ long *rec_in_cml=NULL; /* [nbr] Number of records, read or not, in all processed files */ long *rec_usd_cml=NULL; /* [nbr] Cumulative number of input records used (catenated by ncrcat or operated on by ncra) */ long rec_dmn_sz=0L; /* [idx] Size of record dimension, if any, in current file (increments by srd) */ long rec_rmn_prv_ssc=0L; /* [idx] Records remaining to be read in current subcycle group */ long rec_rmn_prv_ilv=0L; /* [idx] Records remaining to be read in current interleaved index */ md5_sct *md5=NULL; /* [sct] MD5 configuration */ nco_bool *REC_LST_DSR=NULL; /* [flg] Record is last desired from all input files */ nco_bool *flg_input_complete=NULL; /* [flg] All requested records in record dimension have been read */ nco_bool CNV_ARM; nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ nco_bool FLG_BFR_NRM=False; /* [flg] Current output buffers need normalization */ nco_bool FLG_ILV=False; /* [flg] Interleave Output */ nco_bool FLG_MRO=False; /* [flg] Multi-Record Output */ nco_bool FLG_MSO=False; /* [flg] Multi-Subcycle Output */ nco_bool FL_LST_IN_APPEND=True; /* Option H */ nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ nco_bool NORMALIZE_BY_WEIGHT=True; /* [flg] Normalize by command-line weight */ nco_bool NRM_BY_DNM=True; /* [flg] Normalize by denominator */ nco_bool PROMOTE_INTS=False; /* [flg] Promote integers to floating point in output */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool REC_APN=False; /* [flg] Append records directly to output file */ nco_bool REC_FRS_GRP=False; /* [flg] Record is first in current group */ nco_bool REC_LST_GRP=False; /* [flg] Record is last in current group */ nco_bool REC_SRD_LST=False; /* [flg] Record belongs to last stride of current file */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ nco_bool flg_cb=False; /* [flg] Climatology bounds */ nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ nco_bool flg_skp1; /* [flg] Current record is not dimension of this variable */ nco_bool flg_skp2; /* [flg] Current record is not dimension of this variable */ nco_bool flg_wgt_by_rec_not_by_fl=False; /* [flg] Weight each record (not file) by command-line numeric weights, if any */ nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */ nco_int base_time_srt=nco_int_CEWI; nco_int base_time_crr=nco_int_CEWI; nc_type var_prc_typ_pre_prm=NC_NAT; /* [enm] Type of variable before promotion */ nc_type var_typ_out=NC_NAT; /* [enm] Type of variable in output file */ scv_sct wgt_scv; scv_sct wgt_avg_scv; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ trv_sct *var_trv; /* [sct] Variable GTT object */ trv_tbl_sct *trv_tbl; /* [lst] Traversal table */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out=NULL_CEWI; var_sct **var_prc; var_sct **var_prc_out; var_sct *wgt=NULL; /* [sct] Raw weight on disk in input file */ var_sct *wgt_out=NULL; /* [sct] Copy of wgt Tally and val members malloc'd & initialized IDs updated each new file by nco_var_mtd_refresh() in file loop Current record value obtained by nco_msa_var_get_rec_trv() in record loop One copy of wgt_out used for all variables */ var_sct *wgt_avg=NULL; /* [sct] Copy of wgt_out created to mimic var_prc_out processing Holds running total and tally of weight Acts as op2 for wgt_out averaging just before var_prc[nbr_var_prc-1] */ #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"cll_mth",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"cell_methods",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"no_cll_mth",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"no_cell_methods",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"rth_dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"rth_flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */ {"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"help",no_argument,0,0}, {"hlp",no_argument,0,0}, {"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */ {"md5_dgs",no_argument,0,0}, /* [flg] Perform MD5 digests */ {"md5_digest",no_argument,0,0}, /* [flg] Perform MD5 digests */ {"mro",no_argument,0,0}, /* [flg] Multi-Record Output */ {"mso",no_argument,0,0}, /* [flg] Multi-Subcycle Output */ {"multi_record_output",no_argument,0,0}, /* [flg] Multi-Record Output */ {"multi_subcycle_output",no_argument,0,0}, /* [flg] Multi-Subcycle Output */ {"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"nsm_fl",no_argument,0,0}, {"nsm_grp",no_argument,0,0}, {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"per_record_weights",no_argument,0,0}, /* [flg] Weight each record (not file) by command-line numeric weights, if any */ {"prm_ints",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"prm_ntg",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"promote_integers",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"prw",no_argument,0,0}, /* [flg] Weight each record (not file) by command-line numeric weights, if any */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"rec_apn",no_argument,0,0}, /* [flg] Append records directly to output file */ {"record_append",no_argument,0,0}, /* [flg] Append records directly to output file */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cb",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"clm_bnd",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"clm_nfo",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"ilv_srd",required_argument,0,0}, /* [flg] Interleave stride */ {"interleave_srd",required_argument,0,0}, /* [flg] Interleave stride */ {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"nsm_sfx",required_argument,0,0}, {"ensemble_suffix",required_argument,0,0}, /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"fl_lst_in",no_argument,0,'H'}, {"file_list",no_argument,0,'H'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"no-normalize-by-weight",no_argument,0,'N',}, {"no_nrm_by_wgt",no_argument,0,'N',}, {"nintap",required_argument,0,'n'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"path",required_argument,0,'p'}, {"pack",required_argument,0,'P'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"wgt",required_argument,0,'w'}, {"weight",required_argument,0,'w'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"pseudonym",required_argument,0,'Y'}, {"program",required_argument,0,'Y'}, {"prg_nm",required_argument,0,'Y'}, {"math",required_argument,0,'y'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef _LIBINTL_H setlocale(LC_ALL,""); /* LC_ALL sets all localization tokens to same value */ bindtextdomain("nco","/home/zender/share/locale"); /* ${LOCALEDIR} is e.g., /usr/share/locale */ /* MO files should be in ${LOCALEDIR}/es/LC_MESSAGES */ textdomain("nco"); /* PACKAGE is name of program or library */ #endif /* not _LIBINTL_H */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); #ifdef ENABLE_MPI /* MPI Initialization */ if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm); MPI_Init(&argc,&argv); MPI_Comm_size(mpi_cmm,&prc_nbr); MPI_Comm_rank(mpi_cmm,&prc_rnk); #endif /* !ENABLE_MPI */ /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"baa") || !strcmp(opt_crr,"bit_alg")){ nco_baa_cnv=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif baa */ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){ cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_csh_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */ if(!strcmp(opt_crr,"cb") || !strcmp(opt_crr,"clm_bnd") || !strcmp(opt_crr,"clm_nfo") || !strcmp(opt_crr,"climatology_information")){ clm_nfo_sng=(char *)strdup(optarg); flg_cb=True; /* [sct] Process climatology and bounds information */ } /* !clm_nfo */ if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */ if(!strcmp(opt_crr,"cll_mth") || !strcmp(opt_crr,"cell_methods")) flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"no_cll_mth") || !strcmp(opt_crr,"no_cell_methods")) flg_cll_mth=False; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"dbl") || !strcmp(opt_crr,"rth_dbl")) nco_rth_cnv=nco_rth_flt_dbl; /* [flg] Arithmetic convention: promote float to double */ if(!strcmp(opt_crr,"flt") || !strcmp(opt_crr,"rth_flt")) nco_rth_cnv=nco_rth_flt_flt; /* [flg] Arithmetic convention: keep single-precision */ if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */ if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); } /* endif "help" */ if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */ if(!strcmp(opt_crr,"ilv_srd") || !strcmp(opt_crr,"interleave_stride")){ ilv_srd=strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); if(ilv_srd < 1L){ (void)fprintf(stdout,"%s: ERROR Interleave stride argument is %li but must be > 0\n",nco_prg_nm_get(),ilv_srd); nco_exit(EXIT_FAILURE); } /* end if */ FLG_ILV=FLG_MRO=True; /* [flg] Interleave stride */ } /* !ilv_srd */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"md5_dgs") || !strcmp(opt_crr,"md5_digest")){ if(!md5) md5=nco_md5_ini(); md5->dgs=True; if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO Will perform MD5 digests of input and output hyperslabs\n",nco_prg_nm_get()); } /* endif "md5_dgs" */ if(!strcmp(opt_crr,"mro") || !strcmp(opt_crr,"multi_record_output")) FLG_MRO=True; /* [flg] Multi-Record Output */ if(!strcmp(opt_crr,"mso") || !strcmp(opt_crr,"multi_subcycle_output")) FLG_MSO=True; /* [flg] Multi-Subcycle Output */ if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ if(!strcmp(opt_crr,"nsm_fl") || !strcmp(opt_crr,"nsm_file") || !strcmp(opt_crr,"ensemble_file")){ if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs("ncfe",&nco_prg_id); } /* endif nsm_fl */ if(!strcmp(opt_crr,"nsm_grp") || !strcmp(opt_crr,"nsm_group") || !strcmp(opt_crr,"ensemble_group")){ if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs("ncge",&nco_prg_id); } /* endif nsm_grp */ if(!strcmp(opt_crr,"nsm_sfx") || !strcmp(opt_crr,"ensemble_suffix")) nsm_sfx=(char *)strdup(optarg); if(!strcmp(opt_crr,"per_record_weights") || !strcmp(opt_crr,"prw")) flg_wgt_by_rec_not_by_fl=True; /* [flg] Weight each record (not file) by command-line numeric weights, if any */ if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){ ppc_arg[ppc_nbr]=(char *)strdup(optarg); ppc_nbr++; } /* endif "ppc" */ if(!strcmp(opt_crr,"prm_ints") || !strcmp(opt_crr,"prm_ntg") || !strcmp(opt_crr,"promote_integers")){ PROMOTE_INTS=True; /* [flg] Promote integers to floating point in output */ if(nco_prg_id_get() != ncra){ (void)fprintf(stdout,"%s: ERROR Option --promote_integers to archive arithmetically processed integer-valued variables as floating point values is only supported with ncra\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* end if */ } /* !prm_int */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"rec_apn") || !strcmp(opt_crr,"record_append")){ REC_APN=True; /* [flg] Append records directly to output file */ FORCE_APPEND=True; } /* endif "rec_apn" */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=True; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'G': /* Apply Group Path Editing (GPE) to output group */ /* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */ gpe=nco_gpe_prs_arg(optarg); fl_out_fmt=NC_FORMAT_NETCDF4; break; case 'g': /* Copy group argument for later processing */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'H': /* Toggle writing input file list attribute */ FL_LST_IN_APPEND=!FL_LST_IN_APPEND; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'N': NRM_BY_DNM=False; NORMALIZE_BY_WEIGHT=False; break; case 'n': /* NINTAP-style abbreviation of files to average */ fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr); if(abb_arg_nbr < 1 || abb_arg_nbr > 6){ (void)fprintf(stdout,gettext("%s: ERROR Incorrect abbreviation for file list\n"),nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); } /* end if */ break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'P': /* Packing policy */ nco_pck_plc_sng=(char *)strdup(optarg); nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'w': /* Per-file and per-record weights */ if(isalpha(optarg[0]) || optarg[0] == '/'){ wgt_nm=(char *)strdup(optarg); }else{ /* !wgt_nm */ optarg_lcl=(char *)strdup(optarg); wgt_lst_in=nco_lst_prs_2D(optarg_lcl,",",&wgt_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); wgt_arr=(double *)nco_malloc(wgt_nbr*sizeof(double)); for(idx=0L;idx<wgt_nbr;idx++){ wgt_arr[idx]=strtod(wgt_lst_in[idx],&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(wgt_lst_in[idx],"strtod",sng_cnv_rcd); wgt_avg_scl+=wgt_arr[idx]; } /* end loop over elements */ if(NORMALIZE_BY_WEIGHT) wgt_avg_scl/=wgt_nbr; else wgt_avg_scl=1.0/wgt_nbr; assert(wgt_avg_scl != 0.0); if(NORMALIZE_BY_WEIGHT) for(idx=0L;idx<wgt_nbr;idx++) wgt_arr[idx]/=wgt_avg_scl; if(nco_dbg_lvl >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO per-file or (with --prw) per-record weights: ",nco_prg_nm_get()); for(idx=0L;idx<wgt_nbr;idx++) (void)fprintf(stderr,"wgt_arr[%d]=%g%s",idx,wgt_arr[idx],idx < wgt_nbr-1 ? ", " : "\n"); } /* !dbg */ } /* !wgt_nm */ break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'Y': /* Pseudonym */ /* Call nco_prg_prs() to reset pseudonym */ optarg_lcl=(char *)strdup(optarg); if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs(optarg_lcl,&nco_prg_id); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'y': /* Operation type */ nco_op_typ_sng=(char *)strdup(optarg); if(nco_prg_id == ncra || nco_prg_id == ncfe) nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */ (void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Set/report global chunk cache */ rcd+=nco_cnk_csh_ini(cnk_csh_byt); /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); if(flg_wgt_by_rec_not_by_fl && nco_prg_id_get() != ncra){ (void)fprintf(fp_stdout,"%s: ERROR Illegal invocation of flag --per_record_weights (or --prw)\nHINT: Per-record weighting by command-line numeric weights is only available with ncra\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* flg_wgt_by_rec_not_by_fl */ if(wgt_arr){ if(wgt_nbr != fl_nbr && !flg_wgt_by_rec_not_by_fl){ (void)fprintf(fp_stdout,"%s: ERROR User-specified per-file weight array has %d elements but there are %d input files.\nHINT: Specify one weight per input file, or toggle the default behavior by invoking with --per_record_weights (or synonym --prw) which causes command-line weights to be applied per-record not per-file.\n",nco_prg_nm_get(),wgt_nbr,fl_nbr); nco_exit(EXIT_FAILURE); } /* !wgt_nbr */ } /* !wgt_arr */ /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); (void)nco_inq_format(in_id,&fl_in_fmt); /* Initialize traversal table */ trv_tbl_init(&trv_tbl); /* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,var_lst_in_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl); /* Were all user-specified dimensions found? */ (void)nco_chk_dmn(lmt_nbr,flg_dne); /* Store ncge ensemble suffix in table */ if(nco_prg_id == ncge && nsm_sfx) trv_tbl->nsm_sfx=nsm_sfx; /* Get number of variables, dimensions, and global attributes in file, file format */ (void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,&dmn_rec_fl,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl); /* Record handling operators only */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* Build record dimensions array */ (void)nco_bld_rec_dmn(in_id,FORTRAN_IDX_CNV,&lmt_rec,&nbr_rec,trv_tbl); /* Allocate arrays for multi-records cases */ flg_input_complete=(nco_bool *)nco_malloc(nbr_rec*sizeof(nco_bool)); idx_rec_out=(long *)nco_malloc(nbr_rec*sizeof(long)); rec_in_cml=(long *)nco_malloc(nbr_rec*sizeof(long)); rec_usd_cml=(long *)nco_malloc(nbr_rec*sizeof(long)); REC_LST_DSR=(nco_bool *)nco_malloc(nbr_rec*sizeof(nco_bool)); /* Initialize arrays for multi-records cases */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ flg_input_complete[idx_rec]=False; idx_rec_out[idx_rec]=0L; rec_in_cml[idx_rec]=0L; rec_usd_cml[idx_rec]=0L; REC_LST_DSR[idx_rec]=False; } /* Initialize arrays */ } /* Record handling operators only */ /* Is this an ARM-format data file? */ CNV_ARM=nco_cnv_arm_inq(in_id); /* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */ if(CNV_ARM) base_time_srt=nco_cnv_arm_base_time_get(in_id); /* Fill-in variable structure list for all extracted variables */ var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl); /* Duplicate to output array */ var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over xtr */ /* Refresh var_out with dim_out data */ (void)nco_var_dmn_refresh(var_out,xtr_nbr); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl); /* Store processed and fixed variables info into GTT */ (void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl); /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Initialize, decode, and set PPC information */ if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl); /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Initialize chunking from user-specified inputs */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk); /* Keep integers promoted to double-precision on output */ // if(PROMOTE_INTS) (void)nco_set_prm_typ_out(xtr_nbr,var,trv_tbl); // (void)nco_set_prm_typ_out(xtr_nbr,var,trv_tbl); if(nco_prg_id_get() == ncra) (void)nco_set_prm_typ_out(PROMOTE_INTS,xtr_nbr,var,trv_tbl); /* Define dimensions, extracted groups, variables, and attributes in output file */ (void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,!REC_APN,False,nco_pck_plc_nil,(char *)NULL,trv_tbl); /* Define ensemble fixed variables (True parameter) */ if(nco_prg_id_get() == ncge) (void)nco_nsm_dfn_wrt(in_id,out_id,&cnk,dfl_lvl,gpe,True,trv_tbl); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); /* Add input file list global attribute */ if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr); /* Turn-off default filling behavior to enhance efficiency */ (void)nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Add climatology_bounds attribute to output file (before cell_methods) */ if(flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat || nco_prg_id == ncfe)){ char bnd_sng[]="bounds"; /* CF-standard time-bounds attribute name */ char clm_sng[]="climatology"; /* CF-standard climatology bounds attribute name */ char cln_sng[]="calendar"; /* CF-standard calendar attribute name */ char unt_sng[]="units"; /* NUG-standard units attribute name */ long att_sz; nc_type att_typ; cb=(clm_bnd_sct *)nco_malloc(sizeof(clm_bnd_sct)); cb->bnd2clm=False; /* [flg] Convert time-bounds to climatology bounds */ cb->bnd_val=NULL; /* [frc] Time coordinate variable values */ cb->clm2bnd=False; /* [flg] Convert climatology bounds to time-bounds */ cb->clm2clm=False; /* [flg] Convert climatology bounds to climatology bounds */ cb->clm_bnd_id_in=NC_MIN_INT; /* [id] Climatology bounds ID */ cb->clm_bnd_id_out=NC_MIN_INT; /* [id] Climatology bounds ID */ cb->clm_bnd_in=False; /* [flg] Climatology bounds appear in input */ cb->clm_bnd_nm=NULL; /* [sng] Climatology bounds name */ cb->cln_val=NULL; /* [sng] Bounds calendar value */ cb->dmn_srt_end[0]=0L;cb->dmn_srt_end[1]=1L; cb->dmn_srt_srt[0]=0L;cb->dmn_srt_srt[1]=0L; cb->mth_end=NC_MIN_INT; /* [mth] Month at climo end [1..12] format */ cb->mth_srt=NC_MIN_INT; /* [mth] Month at climo start [1..12] format */ cb->tm_bnd_id_in=NC_MIN_INT; /* [id] Time-bounds ID */ cb->tm_bnd_in=False; /* [flg] Time-bounds appear in input */ cb->tm_bnd_nm=NULL; /* [sng] Time-bounds name */ cb->tm_crd_id_in=NC_MIN_INT; /* [id] Time coordinate ID */ cb->tm_crd_nm=NULL; /* [sng] Time coordinate name */ cb->tm_val=NULL; /* [frc] Time (or climatology) bounds variable values */ cb->tpd=NC_MIN_INT; /* [nbr] Timesteps per day [0=none, 1, 2, 3, 4, 6, 8, 12, 24, ...]*/ cb->type=NC_NAT; /* [enm] Time coordinate type */ cb->unt_val=NULL; /* [sng] Bounds units value */ cb->yr_end=NC_MIN_INT; /* [yr] Year at climo start */ cb->yr_srt=NC_MIN_INT; /* [yr] Year at climo start */ if((rcd=nco_inq_varid_flg(in_id,"time",&cb->tm_crd_id_in)) == NC_NOERR) cb->tm_crd_nm=strdup("time"); else if((rcd=nco_inq_varid_flg(in_id,"Time",&cb->tm_crd_id_in)) == NC_NOERR) cb->tm_crd_nm=strdup("Time"); if(cb->tm_crd_id_in != NC_MIN_INT){ rcd=nco_inq_vartype(in_id,cb->tm_crd_id_in,&cb->type); rcd=nco_inq_att_flg(in_id,cb->tm_crd_id_in,clm_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->clm_bnd_nm=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,cb->tm_crd_id_in,clm_sng,cb->clm_bnd_nm,att_typ); /* NUL-terminate attribute before using strstr() */ cb->clm_bnd_nm[att_sz]='\0'; cb->clm_bnd_in=True; }else{ cb->clm_bnd_nm=strdup("climatology_bounds"); rcd=NC_NOERR; } /* !rcd && att_typ */ rcd=nco_inq_att_flg(in_id,cb->tm_crd_id_in,bnd_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->tm_bnd_nm=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,cb->tm_crd_id_in,bnd_sng,cb->tm_bnd_nm,att_typ); /* NUL-terminate attribute before using strstr() */ cb->tm_bnd_nm[att_sz]='\0'; cb->tm_bnd_in=True; }else{ cb->tm_bnd_nm=strdup("time_bnds"); rcd=NC_NOERR; } /* !rcd && att_typ */ /* Input file must have either (but not both) time bounds or climatology bounds */ if(cb->tm_bnd_in && cb->clm_bnd_in){ (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on time coordinate with both time bounds attribute \"%s\" (value = \"%s\") and climatology bounds attribute \"%s\" (value = \"%s\"). Results would be ambiguous. Turning-off climatology bounds mode.\n",nco_prg_nm_get(),bnd_sng,cb->tm_bnd_nm,clm_sng,cb->clm_bnd_nm); flg_cb=False; goto skp_cb; } /* !(cb->tm_bnd_in && cb->clm_bnd_in) */ if(!cb->tm_bnd_in && !cb->clm_bnd_in){ (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on time coordinate with neither time bounds attribute \"%s\" nor climatology bounds attribute \"%s\". No way to obtain bounding time values. Turning-off climatology bounds mode.\n",nco_prg_nm_get(),bnd_sng,clm_sng); flg_cb=False; goto skp_cb; } /* !cb->tm_bnd_in && !cb->clm_bnd_in */ }else{ /* !tm_crd_id_in */ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with unknown time coordinate. Turning-off climatology bounds mode.\n",nco_prg_nm_get()); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_crd_in */ if(cb->tm_bnd_in){ rcd=nco_inq_varid_flg(in_id,cb->tm_bnd_nm,&cb->tm_bnd_id_in); if(cb->tm_bnd_id_in == NC_MIN_INT){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with missing time bounds variable \"%s\". Turning-off climatology bounds mode.\n",nco_prg_nm_get(),cb->tm_bnd_nm); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_bnd_id_in */ } /* !tm_bnd_in */ if(cb->clm_bnd_in){ rcd=nco_inq_varid_flg(in_id,cb->clm_bnd_nm,&cb->clm_bnd_id_in); if(cb->clm_bnd_id_in == NC_MIN_INT){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with missing climatology bounds variable \"%s\". Turning-off climatology bounds mode.\n",nco_prg_nm_get(),cb->tm_bnd_nm); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_bnd_id_in */ } /* !clm_bnd_in */ rcd=nco_inq_varid_flg(out_id,cb->tm_crd_nm,&cb->tm_crd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Climatology bounds did not find time coordinate in output file\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !tm_crd_id_out */ /* Populate cb structure with information from clm_nfo_sng */ if(clm_nfo_sng) rcd=nco_clm_nfo_get(clm_nfo_sng,cb); if(cb->tpd == 0){ /* Monthly mean input */ if(cb->mth_srt == 1 && cb->mth_end == 12){ /* Climatological monthly or seasonal means will be reduced to climatological annual means */ /* DJF seasonal climos in SCD mode present as Y1,Y2,12,2 where DJF seasonal climos in SDD mode present as Y1,Y2,1,12 which is the same as ANN Thus determining clm2clm not clm2bnd for SDD DJF presents special difficulty Hardcode this case as DJF/clm2clm unless fl_nbr = 4 or 12 in which case ANN/clm2bnd */ if(fl_nbr == 3 && cb->clm_bnd_in) cb->clm2clm=True; else if((fl_nbr == 4 || fl_nbr == 12) && cb->clm_bnd_in) cb->clm2bnd=True; else{ (void)fprintf(stderr,"%s: INFO Combination of months and clm_nfo lead to ambiguous determination of clm2bnd or clm2clm. Turning-off climatology bounds mode.\n",nco_prg_nm_get()); flg_cb=False; goto skp_cb; } }else{ /* Climatological monthly or seasonal means will be processed to non-annual means */ if(cb->tm_bnd_in) cb->bnd2clm=True; if(cb->clm_bnd_in) cb->clm2clm=True; } /* !cb->mth */ // }else if(cb->tpd == 1){ /* Daily mean input is currently not handled */ //assert(cb->tpd != 1); }else if(cb->tpd >= 1){ /* Diurnally resolved input */ if(cb->tm_bnd_in) cb->bnd2clm=True; if(cb->clm_bnd_in) cb->clm2clm=True; } /* !cb->tpd */ cb->tm_val=(double *)nco_malloc(max_int(1,cb->tpd)*sizeof(double)); /* [frc] Time coordinate variable values */ cb->bnd_val=(double *)nco_malloc(max_int(1,cb->tpd)*2*sizeof(double)); /* [frc] Time (or climatology) bounds variable values */ if(cb->bnd2clm){ rcd=nco_inq_varid_flg(out_id,cb->tm_bnd_nm,&cb->tm_bnd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Time-bounds variable %s was not copied to output file\n",nco_prg_nm_get(),cb->tm_bnd_nm); nco_exit(EXIT_FAILURE); } /* !tm_bnd_id_out */ /* Write climatology bounds to time-bounds then rename */ cb->clm_bnd_id_out=cb->tm_bnd_id_out; } /* !bnd2clm */ if(cb->clm2clm || cb->clm2bnd){ rcd=nco_inq_varid_flg(out_id,cb->clm_bnd_nm,&cb->clm_bnd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Climatology bounds variable %s was not copied to output file\n",nco_prg_nm_get(),cb->clm_bnd_nm); nco_exit(EXIT_FAILURE); } /* !clm_bnd_id_out */ /* clm2bnd writes time-bounds to climatology bounds then renames, and clm2clm uses tm_bnd_id_out */ cb->tm_bnd_id_out=cb->clm_bnd_id_out; } /* !clm2clm */ /* Begin attribute manipulation */ aed_sct aed_mtd; char *att_nm; char *att_val; if(cb->bnd2clm || cb->clm2bnd){ /* Add new bounds attribute */ att_nm = (cb->bnd2clm) ? strdup(clm_sng) : strdup(bnd_sng); att_val= (cb->bnd2clm) ? strdup(cb->clm_bnd_nm) : strdup(cb->tm_bnd_nm); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=cb->tm_crd_nm; aed_mtd.id=cb->tm_crd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,cb->tm_crd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); /* Delete old bounds attribute */ att_nm= (cb->bnd2clm) ? strdup(bnd_sng) : strdup(clm_sng); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=cb->tm_crd_nm; aed_mtd.id=cb->tm_crd_id_out; aed_mtd.mode=aed_delete; (void)nco_aed_prc(out_id,cb->tm_crd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); } /* !bnd2clm !clm2bnd */ /* Obtain units string */ rcd=nco_inq_att_flg(out_id,cb->tm_crd_id_out,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->unt_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(out_id,cb->tm_crd_id_out,unt_sng,cb->unt_val,att_typ); /* NUL-terminate attribute before using strstr() */ cb->unt_val[att_sz]='\0'; } /* !rcd */ /* Copy units attribute from coordinate to new bounds if necessary */ if(cb->tm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->tm_bnd_id_out,unt_sng,&att_typ,&att_sz); if(cb->clm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->clm_bnd_id_out,unt_sng,&att_typ,&att_sz); if(rcd != NC_NOERR){ if(cb->bnd2clm || cb->clm2bnd){ /* Add units attribute */ att_nm=strdup(unt_sng); att_val=strdup(cb->unt_val); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=(cb->bnd2clm) ? cb->tm_bnd_nm : cb->clm_bnd_nm; aed_mtd.id=(cb->bnd2clm) ? cb->tm_bnd_id_out : cb->clm_bnd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; if(cb->bnd2clm) (void)nco_aed_prc(out_id,cb->tm_bnd_id_out,aed_mtd); else (void)nco_aed_prc(out_id,cb->clm_bnd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); } /* !bnd2clm !clm2bnd */ rcd=NC_NOERR; } /* !rcd */ /* Obtain calendar string */ rcd=nco_inq_att_flg(out_id,cb->tm_crd_id_out,cln_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->cln_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(out_id,cb->tm_crd_id_out,cln_sng,cb->cln_val,att_typ); /* NUL-terminate attribute before using strstr() */ cb->cln_val[att_sz]='\0'; } /* !rcd */ /* Copy calendar attribute from coordinate to new bounds if necessary */ if(cb->tm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->tm_bnd_id_out,cln_sng,&att_typ,&att_sz); if(cb->clm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->clm_bnd_id_out,cln_sng,&att_typ,&att_sz); if(rcd != NC_NOERR){ if(cb->bnd2clm || cb->clm2bnd){ /* Add calendar attribute */ att_nm=strdup(cln_sng); att_val=strdup(cb->cln_val); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=(cb->bnd2clm) ? cb->tm_bnd_nm : cb->clm_bnd_nm; aed_mtd.id=(cb->bnd2clm) ? cb->tm_bnd_id_out : cb->clm_bnd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; if(cb->bnd2clm) (void)nco_aed_prc(out_id,cb->tm_bnd_id_out,aed_mtd); else (void)nco_aed_prc(out_id,cb->clm_bnd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); } /* !bnd2clm !clm2bnd */ rcd=NC_NOERR; } /* !rcd */ /* Combine calendar and units strings with clm_nfo_sng to create climatological time and bounds arrays */ if(clm_nfo_sng) rcd=nco_clm_nfo_to_tm_bnds(cb->yr_srt,cb->yr_end,cb->mth_srt,cb->mth_end,cb->tpd,cb->unt_val,cb->cln_val,cb->bnd_val,cb->tm_val); //assert(rcd != NCO_NOERR); } /* !flg_cb */ /* goto skp_cb */ skp_cb: /* free() any abandoned cb structure now or it will be inadvertently used in nco_cnv_cf_cll_mth_add() */ if(!flg_cb) if(cb) cb=(clm_bnd_sct *)nco_free(cb); /* Add cell_methods attributes (before exiting define mode) */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ dmn_sct **dmn=NULL_CEWI; int nbr_dmn=nbr_rec; dmn=(dmn_sct **)nco_malloc(nbr_dmn*sizeof(dmn_sct *)); /* Make dimension array from limit records array */ (void)nco_dmn_lmt(lmt_rec,nbr_dmn,&dmn); /* Add cell_methods attributes (pass as dimension argument a records-only array) */ if(flg_cll_mth) rcd+=nco_cnv_cf_cll_mth_add(out_id,var_prc_out,nbr_var_prc,dmn,nbr_dmn,nco_op_typ,gpe,cb,trv_tbl); if(nbr_dmn > 0) dmn=nco_dmn_lst_free(dmn,nbr_dmn); } /* !ncra */ /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ /* Zero start and stride vectors for all output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Copy variable data for non-processed variables */ (void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl); /* Write ensemble fixed variables (False parameter) */ if(nco_prg_id_get() == ncge) (void)nco_nsm_dfn_wrt(in_id,out_id,&cnk,dfl_lvl,gpe,False,trv_tbl); /* Allocate and, if necesssary, initialize accumulation space for processed variables */ for(idx=0;idx<nbr_var_prc;idx++){ /* Record operators only need space for one record, not entire variable */ if(nco_prg_id == ncra || nco_prg_id == ncrcat) var_prc[idx]->sz=var_prc[idx]->sz_rec=var_prc_out[idx]->sz=var_prc_out[idx]->sz_rec; if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge){ /* 20200701: Iff has_mss_val then need wgt_sum to track running sum of time-varying (per-record or per-file) weights applied at each grid point in variables that may have spatio-temporally varying missing values */ if((wgt_arr || wgt_nm) && var_prc[idx]->has_mss_val) var_prc_out[idx]->wgt_sum=var_prc[idx]->wgt_sum=(double *)nco_calloc(var_prc_out[idx]->sz,sizeof(double)); else var_prc_out[idx]->wgt_sum=NULL; var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_calloc(var_prc_out[idx]->sz,sizeof(long)); var_prc_out[idx]->val.vp=(void *)nco_calloc(var_prc_out[idx]->sz,nco_typ_lng(var_prc_out[idx]->type)); } /* end if */ } /* end loop over idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Find weight variable that matches current variable */ wgt=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,wgt_nm,var_prc[0],trv_tbl); /* ncra can handle scalar, 1-D, and degenerate 1-D weights, nces requires scalar weights */ if(nco_prg_id == ncra) assert(wgt->nbr_dim < 2); if(nco_prg_id == ncfe || nco_prg_id == ncge){ if(wgt->nbr_dim == 1) assert(wgt->sz_rec == 1L); else assert(wgt->nbr_dim == 0); } /* !ncfe */ /* Change wgt from a normal full (scalar or 1-D) variable to a scalar variable This permits us to weight with scalar arithmetic later, rather than broadcasting the weight This differs from ncwa wgt treatment (where wgt can be N-D and is always broadcast to match variable) 20150708: Unsure why nco_var_dpl() calls below generate valgrind invalid read errors */ /* 20200701: Verified that sz_rec == 1 when wgt is scalar */ // if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: DEBUG wgt_nm=%s, wgt->sz_rec=%li\n",nco_prg_nm_get(),wgt_nm,wgt->sz_rec); wgt->val.vp=(void *)nco_realloc(wgt->val.vp,wgt->sz_rec*nco_typ_lng(wgt->type)); wgt->tally=(long *)nco_realloc(wgt->tally,wgt->sz_rec*sizeof(long)); (void)nco_var_zero(wgt->type,wgt->sz_rec,wgt->val); (void)nco_zero_long(wgt->sz_rec,wgt->tally); wgt_out=nco_var_dpl(wgt); wgt_avg=nco_var_dpl(wgt_out); } /* !wgt_nm */ /* Close first input netCDF file */ nco_close(in_id); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; /* Loop over input files */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("%s: INFO Input file %d is %s"),nco_prg_nm_get(),fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,gettext(", local file is %s"),fl_in); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); in_id=in_id_arr[0]; /* Do ncge ensemble refresh */ if(nco_prg_id == ncge){ /* Refresh ensembles */ if(fl_idx > 0) (void)nco_nsm_ncr(in_id,trv_tbl); /* Check if ensembles are valid */ (void)nco_chk_nsm(in_id,fl_idx,trv_tbl); }else{ /* ! ncge */ /* Variables may have different ID, missing_value, type, in each file */ for(idx=0;idx<nbr_var_prc;idx++){ /* Obtain variable GTT object using full variable name */ trv_sct *trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,trv->grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,var_prc[idx]); } /* end loop over variables */ } /* ! ncge */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Get weight ID in this file */ trv_sct *trv=trv_tbl_var_nm_fll(wgt_out->nm_fll,trv_tbl); (void)nco_inq_grp_full_ncid(in_id,trv->grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,wgt_out); } /* !wgt_nm */ if(FLG_ILV && (nco_prg_id == ncfe || nco_prg_id == ncge)){ (void)fprintf(fp_stderr,"%s: ERROR Interleaving requested for operator %s\nHINT: Interleaving is only valid for ncra and ncrcat\n",nco_prg_nm_get(),nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* ! FLG_ILV */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe and ncge jump to else branch */ /* Loop over all record dimensions in file */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ char *fl_udu_sng=NULL_CEWI; char ***rgd_arr_bnds_lst=NULL_CEWI; char ***rgd_arr_climo_lst=NULL_CEWI; int rgd_arr_bnds_nbr=0; int rgd_arr_climo_nbr=0; int ilv_per_ssc; /* [nbr] Number of interleaves per sub-cycle */ /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,lmt_rec[idx_rec]->grp_nm_fll,&grp_id); /* Fill record array */ //if(FLG_ILV){ //lmt_rec[idx_rec]->flg_ilv=True; //lmt_rec[idx_rec]->ilv=ilv_srd; //} /* !FLG_ILV */ (void)nco_lmt_evl(grp_id,lmt_rec[idx_rec],rec_usd_cml[idx_rec],FORTRAN_IDX_CNV); /* ILV and MRO may be set in nco_lmt_evl(), and MRO may also be set on command-line */ FLG_ILV=lmt_rec[idx_rec]->flg_ilv; if(FLG_ILV) FLG_MRO=lmt_rec[idx_rec]->flg_mro; if(FLG_MRO) lmt_rec[idx_rec]->flg_mro=True; if(FLG_MSO) lmt_rec[idx_rec]->flg_mso=True; ilv_per_ssc=lmt_rec[idx_rec]->ssc/lmt_rec[idx_rec]->ilv; /* Sub-cycles never cross file boundaries in interleave-compliant files */ if(lmt_rec[idx_rec]->is_rec_dmn){ int crd_id; if(nco_inq_varid_flg(grp_id,lmt_rec[idx_rec]->nm,&crd_id) == NC_NOERR){ fl_udu_sng=nco_lmt_get_udu_att(grp_id,crd_id,"units"); rgd_arr_bnds_lst=nco_lst_cf_att(grp_id,"bounds",&rgd_arr_bnds_nbr); rgd_arr_climo_lst=nco_lst_cf_att(grp_id,"climatology",&rgd_arr_climo_nbr); } /* !crd_id */ } /* !is_rec_dmn */ if(REC_APN){ int rec_var_out_id; /* Append records directly to output file */ int rec_dmn_out_id=NCO_REC_DMN_UNDEFINED; /* Get group ID using record group full name */ (void)nco_inq_grp_full_ncid(out_id,lmt_rec[idx_rec]->grp_nm_fll,&grp_out_id); /* Get dimension ID (rec_dmn_out_id) of current record from its name */ (void)nco_inq_dimid(grp_out_id,lmt_rec[idx_rec]->nm,&rec_dmn_out_id); /* Get current size of record dimension */ (void)nco_inq_dimlen(grp_out_id,rec_dmn_out_id,&idx_rec_out[idx_rec]); /* 20181212: Re-base relative to calendar units in output file, not first input file */ if(nco_inq_varid_flg(grp_out_id,lmt_rec[idx_rec]->nm,&rec_var_out_id) == NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(fp_stderr,"%s: DEBUG REC_APN mode changing re-base units string of variable \"%s\" from input units \"%s\" ",nco_prg_nm_get(),lmt_rec[idx_rec]->nm,lmt_rec[idx_rec]->rbs_sng); lmt_rec[idx_rec]->rbs_sng=nco_lmt_get_udu_att(grp_out_id,rec_var_out_id,"units"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(fp_stderr,"to output units \"%s\"\n",lmt_rec[idx_rec]->rbs_sng); } /* endif record coordinate exists in output file */ } /* !REC_APN */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s: DEBUG record %d id %d name %s rec_dmn_sz %ld units=\"%s\"\n",nco_prg_nm_get(),idx_rec,lmt_rec[idx_rec]->id,lmt_rec[idx_rec]->nm_fll,lmt_rec[idx_rec]->rec_dmn_sz,fl_udu_sng); /* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */ if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id); /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* This file could be superfluous even though desired data may be found in upcoming files */ if(nco_dbg_lvl >= nco_dbg_std) if((lmt_rec[idx_rec]->srt > lmt_rec[idx_rec]->end) && (lmt_rec[idx_rec]->rec_rmn_prv_ssc == 0L)) (void)fprintf(fp_stdout,"%s: INFO %s (input file %d) is superfluous\n",nco_prg_nm_get(),fl_in,fl_idx); rec_dmn_sz=lmt_rec[idx_rec]->rec_dmn_sz; rec_rmn_prv_ssc=lmt_rec[idx_rec]->rec_rmn_prv_ssc; /* Local copy may be decremented later */ idx_rec_crr_in= (rec_rmn_prv_ssc > 0L) ? 0L : lmt_rec[idx_rec]->srt; if(FLG_ILV && nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stdout,"%s: DEBUG After lmt_evl() for fl_idx=%d ILV=%s MRO=%s, MSO=%s, srt=%ld, end=%ld, srd=%ld, ssc=%ld, ilv=%ld, rec_idx=%ld, rec_rmn_prv_ssc=%ld, rec_rmn_prv_ilv=%ld, idx_rec_out=%ld\n",nco_prg_nm_get(),fl_idx,FLG_ILV ? "YES" : "NO",FLG_MRO ? "YES" : "NO",FLG_MSO ? "YES" : "NO",lmt_rec[idx_rec]->srt,lmt_rec[idx_rec]->end,lmt_rec[idx_rec]->srd,lmt_rec[idx_rec]->ssc,lmt_rec[idx_rec]->ilv,idx_rec_crr_in,rec_rmn_prv_ssc,rec_rmn_prv_ilv,idx_rec_out[idx_rec]); /* Sub-cycles not allowed to cross file boundaries in interleave mode */ if(FLG_ILV && lmt_rec[0]->ilv > 1 && rec_rmn_prv_ilv > 0L){ (void)fprintf(fp_stdout,"%s: ERROR interleaved sub-cycle crosses file boundary between %s (input file %d) and previous file. Diagnostic counters: rec_rmn_prv_ssc = %ld, rec_rmn_prv_ilv = %ld\n",nco_prg_nm_get(),fl_in,fl_idx,rec_rmn_prv_ssc,rec_rmn_prv_ilv); nco_exit(EXIT_FAILURE); } /* !rec_rmn_prv_ilv */ /* Master while loop over records in current file */ while(idx_rec_crr_in >= 0L && idx_rec_crr_in < rec_dmn_sz){ /* Following logic/assumptions built-in to this loop: idx_rec_crr_in points to valid record before loop is entered Loop is never entered if this file has no valid (i.e., desired) records Much conditional logic needed to prescribe group position and next record Index juggling: idx_rec_crr_in: Index of current record in current input file (increments by 1 for ssc then srd-ssc ...) idx_rec_out: Index of record in output file lmt_rec->rec_rmn_prv_ssc: Structure member, at start of this while loop, contains records remaining-to-be-read to complete subcycle group from previous file. Structure member remains constant until next file is read. rec_in_cml: Cumulative number of records, read or not, in all files opened so far. Similar to lmt_rec->rec_in_cml but augmented at end of record loop, rather than prior to record loop. rec_rmn_prv_ssc: Local copy initialized from lmt_rec structure member begins with above, and then is set to and tracks number of records remaining remaining in current group. This means it is decremented from ssc_nbr->0 for each group contained in current file. rec_rmn_prv_ilv: Tracks number of records remaining remaining in current interleaved index. This means it is decremented from ssc/ilv->0 a total of ssc_nbr/ilv_nbr times for each ssc in current file. rec_usd_cml: Cumulative number of input records used (catenated by ncrcat or operated on by ncra) Flag juggling: Groups are the vernacular for a collection of records to output (ncrcat) or reduce (ncra) When introduced in NCO 4.2.1 in 2012, "groups" and sub-cycles (née drn) were synonymous NCO 4.9.4 in 2020 introduced interleaving, which alters the meaning of groups A "group" is now a set of records that ncra reduces/normalizes/outputs as a single record Thus groups and sub-cycles are still synonomous except in ncra in interleave mode In interleave mode, ncra reduces/normalizes/outputs ilv records per ssc (i.e., one output per ssc/ilv records) A non-interleaved group has ssc records, while an interleaved group has ssc/ilv records The relevant group flags REC_FRS_GRP and REC_LST_GRP are now context-sensitive: ncra re-initializes memory at the beginning, and reduces/normalizes/outputs data at the end, respectively, of each group. In normal (non-interleave) mode, groups are sub-cycles of ssc records In interleave mode, the ilv groups per sub-cycle each contain ssc/ilv records In both normal and interleaved mode, REC_FRS_GRP/REC_LST_GRP are true for first/last records in a group, respectively, and false otherwise 20200731 To disambiguate the meanings of REC_FRS_GRP and REC_LST_GRP we introduce: REC_FRS_SSC and REC_LST_SSC for the first and last records in a sub-cycle REC_FRS_CRR_GRP_OUT and REC_LST_CRR_GRP_OUT for the first and last records in the current (ncra output group (if any) REC_LST_DSR is "sloppy"---it is only set in last input file. If last file(s) is/are superfluous, REC_LST_DSR is never set and final normalization is done outside file and record loops (along with nces normalization). FLG_BFR_NRM indicates these situations and allow us to be "sloppy" in setting REC_LST_DSR. 20200719: REC_LST_DSR is not used for FLG_ILV, since complete sub-cycles are assumed to be within a single file, and normalization always occurs at a group ending. */ if(FLG_ILV){ /* Even intra-ssc strides commence group beginnings */ if(rec_rmn_prv_ilv == 0L) REC_FRS_GRP=True; else REC_FRS_GRP=False; //if(FLG_MSO && rec_usd_cml[idx_rec]) REC_FRS_GRP=False; }else{ /* Even inter-ssc strides commence group beginnings */ if(rec_rmn_prv_ssc == 0L) REC_FRS_GRP=True; else REC_FRS_GRP=False; } /* !FLG_ILV */ /* Reset interleaved group counter to ssc/ilv records */ if(FLG_ILV && rec_rmn_prv_ilv == 0L) rec_rmn_prv_ilv=ilv_per_ssc; /* Reset sub-cycle counter to ssc records */ if(rec_rmn_prv_ssc == 0L) rec_rmn_prv_ssc=lmt_rec[idx_rec]->ssc; /* Final record triggers normalization regardless of its location within group */ if(fl_idx == fl_nbr-1 && idx_rec_crr_in == min_int(lmt_rec[idx_rec]->end+lmt_rec[idx_rec]->ssc-1L,rec_dmn_sz-1L)) REC_LST_DSR[idx_rec]=True; /* ncra reduction/normalization/writing code must know last record in current group (LRCG) for both MRO and non-MRO */ if(FLG_ILV){ if(rec_rmn_prv_ilv == 1L) REC_LST_GRP=True; else REC_LST_GRP=False; //if(FLG_MSO && !REC_LST_DSR[idx_rec]) REC_LST_GRP=False; }else{ if(rec_rmn_prv_ssc == 1L) REC_LST_GRP=True; else REC_LST_GRP=False; } /* !FLG_ILV */ /* Last stride in file has distinct index-augmenting behavior */ if(idx_rec_crr_in >= lmt_rec[idx_rec]->end) REC_SRD_LST=True; else REC_SRD_LST=False; if(FLG_ILV && nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stdout,"%s: DEBUG rec_idx=%ld, rec_rmn_prv_ssc=%ld, rec_rmn_prv_ilv=%ld, REC_FRS_GRP=%s, REC_LST_GRP=%s, REC_SRD_LST=%s, REC_LST_DSR=%s, idx_rec_out=%ld\n",nco_prg_nm_get(),idx_rec_crr_in,rec_rmn_prv_ssc,rec_rmn_prv_ilv,REC_FRS_GRP ? "YES" : "NO",REC_LST_GRP ? "YES" : "NO",REC_SRD_LST ? "YES" : "NO",REC_LST_DSR[idx_rec] ? "YES" : "NO",idx_rec_out[idx_rec]); /* Retrieve this record of weight variable, if any */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)) (void)nco_msa_var_get_rec_trv(in_id,wgt_out,lmt_rec[idx_rec]->nm_fll,idx_rec_crr_in,trv_tbl); /* Process all variables in current record */ if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(fp_stdout,"%s: INFO Record %ld of %s contributes to output record %ld\n",nco_prg_nm_get(),idx_rec_crr_in,fl_in,idx_rec_out[idx_rec]); #ifdef _OPENMP #pragma omp parallel for private(idx,in_id) shared(CNV_ARM,FLG_BFR_NRM,FLG_ILV,FLG_MRO,FLG_MSO,NORMALIZE_BY_WEIGHT,NRM_BY_DNM,REC_FRS_GRP,REC_LST_DSR,base_time_crr,base_time_srt,fl_idx,fl_in,fl_nbr,fl_out,fl_udu_sng,flg_skp1,flg_skp2,gpe,grp_id,grp_out_fll,grp_out_id,idx_rec,idx_rec_crr_in,idx_rec_out,in_id_arr,lmt_rec,md5,nbr_dmn_fl,nbr_rec,nbr_var_prc,nco_dbg_lvl,nco_op_typ,nco_prg_id,out_id,rcd,rec_usd_cml,rgd_arr_bnds_lst,rgd_arr_bnds_nbr,rgd_arr_climo_lst,rgd_arr_climo_nbr,thr_nbr,trv_tbl,var_out_id,var_prc,var_prc_out,var_prc_typ_pre_prm,var_trv,wgt_arr,wgt_avg,wgt_nbr,wgt_nm,wgt_out,wgt_scv) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Skip variable if does not relate to current record */ flg_skp1=nco_skp_var(var_prc[idx],lmt_rec[idx_rec]->nm_fll,trv_tbl); if(flg_skp1) continue; if(thr_nbr > 1) in_id=in_id_arr[omp_get_thread_num()]; else in_id=in_id_arr[0]; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm_fll); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); /* Edit group name for output */ grp_out_fll=NULL; if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=var_trv->grp_nm_fll; /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Memory management after current extracted group */ if(gpe && grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Store output variable ID */ var_prc_out[idx]->id=var_out_id; /* Retrieve this record of this variable. NB: Updates hyperslab start indices to idx_rec_crr_in */ (void)nco_msa_var_get_rec_trv(in_id,var_prc[idx],lmt_rec[idx_rec]->nm_fll,idx_rec_crr_in,trv_tbl); if(nco_prg_id == ncra) FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Re-base record coordinate and bounds if necessary (e.g., time, time_bnds) */ /* if(var_prc[idx]->is_crd_var|| nco_is_spc_in_cf_att(grp_id,"bounds",var_prc[idx]->id) || nco_is_spc_in_cf_att(grp_id,"climatology",var_prc[idx]->id)) */ /* Re-base coordinate variable to units of coordinate in the first input file If record hyperslab indice(s) are double or strings then coordinate variable and limits are (re)-read earlier by nco_lmt_evl() and if units between files are incompatible then ncra will die in that call and not in nco_cln_clc_dbl_var_dff() below */ if(var_prc[idx]->is_crd_var){ nco_bool do_rebase=False; if(!strcmp(var_prc[idx]->nm,lmt_rec[idx_rec]->nm) || nco_rgd_arr_lst_chk(rgd_arr_bnds_lst,rgd_arr_bnds_nbr,lmt_rec[idx_rec]->nm,var_prc[idx]->nm) || nco_rgd_arr_lst_chk(rgd_arr_climo_lst,rgd_arr_climo_nbr,lmt_rec[idx_rec]->nm,var_prc[idx]->nm)) do_rebase=True; if(do_rebase && fl_udu_sng && lmt_rec[idx_rec]->rbs_sng){ if(nco_cln_clc_dbl_var_dff(fl_udu_sng,lmt_rec[idx_rec]->rbs_sng,lmt_rec[idx_rec]->cln_typ,(double*)NULL,var_prc[idx]) != NCO_NOERR){ (void)fprintf(fp_stderr,"%s: ERROR in nco_cln_clc_dbl_var_dff() when attempting to re-base variable \"%s\" from units \"%s\" to \"%s\"\n",nco_prg_nm_get(),var_prc[idx]->nm,fl_udu_sng,lmt_rec[idx_rec]->rbs_sng); nco_exit(EXIT_FAILURE); } /* !nco_cln_clc_dbl_var_dff() */ //nco_free(fl_udu_sng); } /* end !do_rebase */ } /* !crd_var */ if(nco_prg_id == ncra){ nco_bool flg_rth_ntl; if(!rec_usd_cml[idx_rec] || (FLG_MRO && REC_FRS_GRP)) flg_rth_ntl=True; else flg_rth_ntl=False; /* Initialize tally and accumulation arrays when appropriate */ if(flg_rth_ntl){ (void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally); (void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val); if(var_prc_out[idx]->wgt_sum) (void)memset(var_prc_out[idx]->wgt_sum,0,var_prc_out[idx]->sz*sizeof(double)); } /* end if flg_rth_ntl */ if(var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING){ /* Do not promote un-averagable types (NC_CHAR, NC_STRING) Stuff their first record into output buffer regardless of nco_op_typ, and ignore later records (rec_usd_cml > 1) Temporarily fixes TODO nco941 */ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_min,var_prc[idx],var_prc_out[idx]); }else{ /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first record in group */ if(flg_rth_ntl) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); var_prc_typ_pre_prm=var_prc[idx]->type; /* [enm] Type of variable before promotion */ var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]); /* Weight current record */ if((wgt_arr || wgt_nm) && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs) && !var_prc[idx]->is_crd_var){ if(wgt_arr){ wgt_scv.type=NC_DOUBLE; if(flg_wgt_by_rec_not_by_fl) wgt_scv.val.d=wgt_arr[idx_rec_crr_in % wgt_nbr]; else wgt_scv.val.d=wgt_arr[fl_idx]; } /* !wgt_arr */ if(wgt_nm){ wgt_scv.type=wgt_out->type; wgt_scv.val.d=wgt_out->val.dp[0]; /* Per-record weight */ } /* !wgt_nm */ if(var_prc[idx]->wgt_sum) var_prc[idx]->wgt_crr=wgt_scv.val.d; nco_scv_cnf_typ(var_prc[idx]->type,&wgt_scv); if(nco_dbg_lvl >= nco_dbg_grp && (wgt_nm || wgt_arr)) (void)fprintf(fp_stdout,"wgt_nm = %s, var_nm = %s, idx = %li, typ = %s, wgt_val = %g, wgt_crr = %g, var_val = %g, ttl = %g, tally = %ld\n",wgt_nm ? wgt_out->nm_fll : "NULL",var_prc[idx]->nm,idx_rec_crr_in,nco_typ_sng(wgt_scv.type),wgt_scv.val.d,var_prc[idx]->wgt_crr,var_prc[idx]->val.dp[0],var_prc_out[idx]->val.dp[0],var_prc_out[idx]->tally[0]); (void)nco_var_scv_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,&wgt_scv); if(wgt_nm && var_prc[idx]->has_mss_val){ (void)fprintf(fp_stdout,"%s: ERROR %s -w wgt_nm does not yet work on variables that contain missing values and variable %s contains a missing value attribute. This is TODO nco1124. %s will now quit rather than compute possibly erroneous values. HINT: Restrict the %s -w wgt_nm operation to variables with no missing value attributes.\n",nco_prg_nm_get(),nco_prg_nm_get(),nco_prg_nm_get(),var_prc[idx]->nm,nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !wgt_nm */ /* Increment running total of wgt_out after its application to last processed variable for this record */ if(wgt_nm && (idx == nbr_var_prc-1)){ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,wgt_out,wgt_avg); else nco_opr_drv((long)1L,nco_op_typ,wgt_out,wgt_avg); } /* !wgt_nm */ } /* !wgt */ /* Perform arithmetic operations: avg, min, max, ttl, ... */ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,var_prc[idx],var_prc_out[idx]); else nco_opr_drv((long)1L,nco_op_typ,var_prc[idx],var_prc_out[idx]); } /* end else */ } /* end if ncra */ /* All processed variables contain record dimension and both ncrcat and ncra write records singly */ var_prc_out[idx]->srt[rec_dmn_idx]=var_prc_out[idx]->end[rec_dmn_idx]=idx_rec_out[idx_rec]; var_prc_out[idx]->cnt[rec_dmn_idx]=1L; /* Append current record to output file */ if(nco_prg_id == ncrcat){ /* Replace this time_offset value with time_offset from initial file base_time */ if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc[idx]->val.vp); #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ if(var_prc_out[idx]->sz_rec > 1L) (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type); else (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type); /* Perform MD5 digest of input and output data if requested */ if(md5) (void)nco_md5_chk(md5,var_prc_out[idx]->nm,var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type),grp_out_id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp); } /* end if ncrcat */ /* Warn if record coordinate, if any, is not monotonic (unless interleaved) */ if(!FLG_ILV && nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec_crr_in,idx_rec_out[idx_rec]); /* Convert missing_value, if any, back to unpacked or unpromoted type Otherwise missing_value will be double-promoted when next record read in nco_msa_var_get_trv() Do not convert after last record otherwise normalization fails due to wrong missing_value type (needs promoted type, not unpacked type) 20140930: This is (too?) confusing and hard-to-follow, a better solution is to add a field mss_val_typ to var_sct and then separately and explicitly track types of both val and mss_val members. */ if(var_prc[idx]->has_mss_val && /* If there is a missing value and... */ !REC_LST_DSR[idx_rec] && /* ...More records will be read (more calls to nco_msa_var_get_trv()) and... */ !(var_prc[idx]->pck_dsk && var_prc_typ_pre_prm != var_prc_out[idx]->type) && /* Exclude conversion on situations like regression test ncra #32 */ var_prc[idx]->type != var_prc[idx]->typ_upk) /* ...variable was auto-promoted (e.g., --dbl) then */ var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk); /* Demote missing value */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); } /* end (OpenMP parallel for) loop over variables */ if(nco_prg_id == ncra && ((FLG_MRO && REC_LST_GRP) || REC_LST_DSR[idx_rec])){ /* Normalize, multiply, etc where necessary: ncra and nces normalization blocks are identical, except ncra normalizes after every ssc records, while nces normalizes once, after files loop. 20131210: nco_cnv_mss_val_typ() can cause type of var_prc to be out-of-sync with var_prc_out nco_cnv_mss_val_typ() above works correctly for case of packing/unpacking, not for rth_dbl Options: 1. Avoid nco_cnv_mss_val_typ() above if rth_dbl is invoked. Keep it for packing. 2. In nco_opr_nrm() below, use mss_val from var_prc_out not var_prc Problem is var_prc[idx]->mss_val is typ_upk while var_prc_out is type, so normalization sets missing var_prc_out value to var_prc[idx]->mss_val read as type */ /* First, divide accumulated (not yet weighted) values by tally to obtain (non-weighted) time-means */ if(NRM_BY_DNM) (void)nco_opr_nrm(nco_op_typ,nbr_var_prc,var_prc,var_prc_out,lmt_rec[idx_rec]->nm_fll,trv_tbl); FLG_BFR_NRM=False; /* [flg] Current output buffers need normalization */ /* Second, multiply unweighted time-mean values by time-mean weights */ for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc[idx]->wgt_sum){ // 20201002: fxm Condition this on if(NORMALIZE_BY_WEIGHT) as is done for ncea below? //if(NORMALIZE_BY_WEIGHT) (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); } /* !wgt_sum */ } /* !idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Third, and only if the weight comes from a record variable in the file ... Compute mean of per-record weight, by normalizing running sum of weight by tally Then normalize all numerical record variables by mean of per-record weight Still ill-defined when MRO is invoked with --wgt Same logic applies in two locations in this code: 1. During SSC normalization inside record loop when REC_LST_DSR is true 2. After file loop for nces, and for ncra with superfluous trailing files */ wgt_avg_scv.type=NC_DOUBLE; wgt_avg->val.dp[0]/=wgt_out->tally[0]; /* NB: wgt_avg tally is kept in wgt_out */ wgt_avg_scv.val.d=wgt_avg->val.dp[0]; for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc_out[idx]->is_crd_var || var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING) continue; nco_scv_cnf_typ(var_prc_out[idx]->type,&wgt_avg_scv); if(NORMALIZE_BY_WEIGHT) (void)nco_var_scv_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,&wgt_avg_scv); } /* end loop over var */ } /* !wgt_nm */ /* Copy averages to output file */ for(idx=0;idx<nbr_var_prc;idx++){ /* Skip variables that do not contain current record dimension */ flg_skp2=nco_skp_var(var_prc[idx],lmt_rec[idx_rec]->nm_fll,trv_tbl); if(flg_skp2) continue; /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc_out[idx]->nm_fll,trv_tbl); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); // 20200831: var_typ_out may differ from typ_upk when PROMOTE_INTS is invoked var_typ_out= PROMOTE_INTS ? var_trv->var_typ_out : var_prc_out[idx]->typ_upk; var_prc_out[idx]=nco_var_cnf_typ(var_typ_out,var_prc_out[idx]); /* Packing/Unpacking */ if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(grp_out_id,var_prc_out[idx],nco_pck_plc); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp); if(var_prc_out[idx]->nbr_dim == 0) (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_typ_out); else (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_typ_out); } /* end loop over idx */ idx_rec_out[idx_rec]++; /* [idx] Index of current record in output file (0 is first, ...) */ } /* end if normalize and write */ /* Prepare indices and flags for next iteration */ if(nco_prg_id == ncrcat) idx_rec_out[idx_rec]++; /* [idx] Index of current record in output file (0 is first, ...) */ rec_usd_cml[idx_rec]++; /* [nbr] Cumulative number of input records used (catenated by ncrcat or operated on by ncra) */ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"\n"); /* Finally, set index for next record or get outta' Dodge */ /* Decrement both counters for next record */ rec_rmn_prv_ssc--; if(FLG_ILV) rec_rmn_prv_ilv--; if(REC_SRD_LST){ /* Next stride or sub-cycle is not within current file */ if(FLG_ILV){ if(rec_rmn_prv_ssc > 0L){ /* Next record is within current sub-cycle */ if(rec_rmn_prv_ilv > 0L){ /* Next record is within current interleave so augment record index by interleave stride */ idx_rec_crr_in+=lmt_rec[idx_rec]->ilv; }else{ /* Otherwise set record index to start next interleave */ idx_rec_crr_in+=1L-(ilv_per_ssc-1L)*lmt_rec[idx_rec]->ilv; } /* !rec_rmn_prv_ilv */ }else{ /* !rec_rmn_prv_ssc */ /* Finished current sub-cycle so break current while loop and jump to next file */ break; } /* !rec_rmn_prv_ssc */ }else{ /* !FLG_ILV */ /* Last index depends on whether user-specified end was exact, sloppy, or caused truncation */ long end_max_crr; end_max_crr=min_lng(lmt_rec[idx_rec]->idx_end_max_abs-rec_in_cml[idx_rec],min_lng(lmt_rec[idx_rec]->end+lmt_rec[idx_rec]->ssc-1L,rec_dmn_sz-1L)); if(rec_rmn_prv_ssc > 0L && idx_rec_crr_in < end_max_crr) idx_rec_crr_in++; else break; } /* !FLG_ILV */ }else{ /* !REC_SRD_LST */ /* Next stride or sub-cycle is within current file */ if(FLG_ILV){ if(rec_rmn_prv_ssc > 0L){ /* Next record is within current sub-cycle */ if(rec_rmn_prv_ilv > 0L){ /* Next record is within current interleave so augment record index by interleave stride */ idx_rec_crr_in+=lmt_rec[idx_rec]->ilv; }else{ /* Otherwise set record index to start next interleave */ idx_rec_crr_in+=1L-(ilv_per_ssc-1L)*lmt_rec[idx_rec]->ilv; } /* !rec_rmn_prv_ilv */ }else{ /* Finished current sub-cycle so hop to next sub-cycle within file */ idx_rec_crr_in+=lmt_rec[idx_rec]->srd-lmt_rec[idx_rec]->ssc+1L; } /* !rec_rmn_prv_ssc */ }else{ /* !FLG_ILV */ /* Augment index by one within sub-cycles or hop to next sub-cycle within file */ if(rec_rmn_prv_ssc > 0L) idx_rec_crr_in++; else idx_rec_crr_in+=lmt_rec[idx_rec]->srd-lmt_rec[idx_rec]->ssc+1L; } /* !FLG_ILV */ } /* !REC_SRD_LST */ } /* end idx_rec_crr_in master while loop over records in current file */ rec_in_cml[idx_rec]+=rec_dmn_sz; /* [nbr] Cumulative number of records in all files opened so far */ lmt_rec[idx_rec]->rec_rmn_prv_ssc=rec_rmn_prv_ssc; if(fl_idx == fl_nbr-1){ /* Warn if other than number of requested records were read */ if(lmt_rec[idx_rec]->lmt_typ == lmt_dmn_idx && lmt_rec[idx_rec]->is_usr_spc_min && lmt_rec[idx_rec]->is_usr_spc_max){ long ssc_grp_nbr_max; /* [nbr] Subcycle groups that start within range */ long rec_nbr_rqs; /* Number of records user requested */ long rec_nbr_rqs_max; /* [nbr] Records that would be used by ssc_grp_nbr_max groups */ long rec_nbr_spn_act; /* [nbr] Records available within user-specified range */ long rec_nbr_spn_max; /* [nbr] Minimum record number spanned by ssc_grp_nbr_max groups */ long rec_nbr_trn; /* [nbr] Records truncated in last group */ long srd_nbr_flr; /* [nbr] Whole strides that fit within specified range */ /* Number of whole strides that fit within specified range */ srd_nbr_flr=(lmt_rec[idx_rec]->max_idx-lmt_rec[idx_rec]->min_idx)/lmt_rec[idx_rec]->srd; ssc_grp_nbr_max=1L+srd_nbr_flr; /* Number of records that would be used by N groups */ rec_nbr_rqs_max=ssc_grp_nbr_max*lmt_rec[idx_rec]->ssc; /* Minimum record number spanned by N groups of size D is N-1 strides, plus D-1 trailing members of last group */ rec_nbr_spn_max=lmt_rec[idx_rec]->srd*(ssc_grp_nbr_max-1L)+lmt_rec[idx_rec]->ssc; /* Actual number of records available within range */ rec_nbr_spn_act=1L+lmt_rec[idx_rec]->max_idx-lmt_rec[idx_rec]->min_idx; /* Number truncated in last group */ rec_nbr_trn=max_int(rec_nbr_spn_max-rec_nbr_spn_act,0L); /* Records requested is maximum minus any truncated in last group */ rec_nbr_rqs=rec_nbr_rqs_max-rec_nbr_trn; if(rec_nbr_rqs != rec_usd_cml[idx_rec]) (void)fprintf(fp_stdout,"%s: WARNING User requested %li records but %s%li were found and used\n",nco_prg_nm_get(),rec_nbr_rqs,(rec_usd_cml[idx_rec] < rec_nbr_rqs) ? "only " : "",rec_usd_cml[idx_rec]); } /* end if */ /* ... and die if no records were read ... */ if(rec_usd_cml[idx_rec] <= 0){ (void)fprintf(fp_stdout,"%s: ERROR No records lay within specified hyperslab\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* end if */ } /* end if */ if(fl_udu_sng) fl_udu_sng=(char*)nco_free(fl_udu_sng); nco_rgd_arr_lst_free(rgd_arr_bnds_lst,rgd_arr_bnds_nbr); nco_rgd_arr_lst_free(rgd_arr_climo_lst,rgd_arr_climo_nbr); } /* end idx_rec loop over different record variables to process */ if(!clm_nfo_sng && flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat)){ /* Obtain climatology bounds from input file 20200822: Deprecate this original method to obtain bounds 20160824: Currently dmn_srt_srt and dmn_srt_end indices are 0 and 1, respectively This means values are always/only taken for first record in input file Thus climatology_bounds are only correct for input files with single timestep To fix this requires updating dmn_srt_srt and dmn_srt_end with correct indices Correct indices must account for multiple input records per file and hyperslabbing (e.g., -d time,3,5) */ int var_id_in; double val_dbl; var_id_in= cb->tm_bnd_in ? cb->tm_bnd_id_in : cb->clm_bnd_id_in; rcd=nco_get_var1(in_id,var_id_in,cb->dmn_srt_srt,&val_dbl,(nc_type)NC_DOUBLE); if(fl_idx == 0) cb->tm_val[0]=val_dbl; if(val_dbl < cb->bnd_val[0]) cb->bnd_val[0]=val_dbl; rcd=nco_get_var1(in_id,var_id_in,cb->dmn_srt_end,&val_dbl,(nc_type)NC_DOUBLE); if(val_dbl > cb->bnd_val[1]) cb->bnd_val[1]=val_dbl; } /* !flg_cb */ /* End ncra, ncrcat section */ }else if(nco_prg_id == ncfe){ /* ncfe */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)) (void)nco_msa_var_get_trv(in_id,wgt_out,trv_tbl); #ifdef _OPENMP #pragma omp parallel for private(idx,in_id) shared(FLG_BFR_NRM,fl_idx,gpe,grp_id,grp_out_fll,grp_out_id,in_id_arr,nbr_dmn_fl,nbr_var_prc,nco_dbg_lvl,nco_op_typ,out_id,rcd,thr_nbr,trv_tbl,var_out_id,var_prc,var_prc_out,var_trv,wgt_arr,wgt_avg,wgt_nbr,wgt_nm,wgt_out,wgt_scv) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ if(thr_nbr > 1) in_id=in_id_arr[omp_get_thread_num()]; else in_id=in_id_arr[0]; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; /* Retrieve variable from disk into memory */ (void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl); /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first record */ if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]); /* Weight current variable (modified from per-record weighting code above) */ nco_bool flg_rth_ntl; if(fl_idx == 0) flg_rth_ntl=True; else flg_rth_ntl=False; if((wgt_arr || wgt_nm) && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs) && !var_prc[idx]->is_crd_var){ if(wgt_arr){ wgt_scv.type=NC_DOUBLE; wgt_scv.val.d=wgt_arr[fl_idx]; /* Per-file weight */ } /* !wgt_arr */ if(wgt_nm){ wgt_scv.type=wgt_out->type; wgt_scv.val.d=wgt_out->val.dp[0]; /* Per-file weight */ } /* !wgt_nm */ if(var_prc[idx]->wgt_sum) var_prc[idx]->wgt_crr=wgt_scv.val.d; nco_scv_cnf_typ(var_prc[idx]->type,&wgt_scv); if(nco_dbg_lvl >= nco_dbg_std && (wgt_nm || wgt_arr)) (void)fprintf(fp_stdout,"wgt_nm = %s, var_nm = %s, fl_idx = %i, typ = %s, wgt_val = %g, wgt_crr = %g, var_val = %g\n",wgt_nm ? wgt_out->nm_fll : "NULL",var_prc[idx]->nm,fl_idx,nco_typ_sng(wgt_scv.type),wgt_scv.val.d,var_prc[idx]->wgt_crr,var_prc[idx]->val.dp[0]); (void)nco_var_scv_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,&wgt_scv); if(wgt_nm && var_prc[idx]->has_mss_val){ (void)fprintf(fp_stdout,"%s: ERROR %s -w wgt_nm does not yet work on variables that contain missing values and variable %s contains a missing value attribute. This is TODO nco1124. %s will now quit rather than compute possibly erroneous values. HINT: Restrict the %s -w wgt_nm operation to variables with no missing value attributes.\n",nco_prg_nm_get(),nco_prg_nm_get(),nco_prg_nm_get(),var_prc[idx]->nm,nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !wgt_nm */ /* Increment running total of wgt_out after its application to last processed variable for this record */ if(wgt_nm && (idx == nbr_var_prc-1)){ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,wgt_out,wgt_avg); else nco_opr_drv((long)1L,nco_op_typ,wgt_out,wgt_avg); } /* !wgt_nm */ } /* !wgt */ /* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */ nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]); FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); } /* end (OpenMP parallel for) loop over idx */ /* End ncfe section */ }else if(nco_prg_id == ncge){ /* ncge */ trv_tbl_sct *trv_tbl1; /* [lst] Traversal table (needed for multi-file cases) */ /* Initialize traversal table */ trv_tbl_init(&trv_tbl1); /* Construct GTT using current file ID */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,var_lst_in_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl1); /* Were all user-specified dimensions found? */ (void)nco_chk_dmn(lmt_nbr,flg_dne); /* Loop over ensembles in current file */ for(int idx_nsm=0;idx_nsm<trv_tbl->nsm_nbr;idx_nsm++){ if(nco_dbg_lvl > nco_dbg_std) (void)fprintf(stdout,"%s: ensemble %d: %s\n",nco_prg_nm_get(),idx_nsm,trv_tbl->nsm[idx_nsm].grp_nm_fll_prn); int mbr_srt=trv_tbl->nsm[idx_nsm].mbr_srt; int mbr_end=trv_tbl->nsm[idx_nsm].mbr_end; /* Loop over ensemble members in current file (use start and end members, multi-file cases) */ for(int idx_mbr=mbr_srt;idx_mbr<mbr_end;idx_mbr++){ /* Loop over all variables */ for(int idx_prc=0;idx_prc<nbr_var_prc;idx_prc++){ /* Obtain variable GTT object for member variable in ensemble */ var_trv=trv_tbl_var_nm_fll(var_prc[idx_prc]->nm_fll,trv_tbl); assert(var_trv); /* Skip if from different ensembles */ if(strcmp(var_trv->nsm_nm,trv_tbl->nsm[idx_nsm].grp_nm_fll_prn)) continue; /* Build new variable name */ char *grp_nm_fll=trv_tbl->nsm[idx_nsm].mbr[idx_mbr].mbr_nm_fll; char *var_nm_fll=nco_bld_nm_fll(grp_nm_fll,var_prc[idx_prc]->nm);; char *nm_fll=strdup(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=(char *)nco_free(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=nco_bld_nm_fll(grp_nm_fll,var_prc[idx_prc]->nm); if(nco_dbg_lvl > nco_dbg_std) (void)fprintf(fp_stdout,"%s:\t variable <%s>\n",nco_prg_nm_get(),var_prc[idx_prc]->nm_fll); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,var_prc[idx_prc]); /* Retrieve variable from disk into memory. NB: Using table in file loop */ (void)nco_msa_var_get_trv(in_id,var_prc[idx_prc],trv_tbl1); /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first member */ if(fl_idx == 0 && idx_mbr == 0) var_prc_out[idx_prc]=nco_typ_cnv_rth(var_prc_out[idx_prc],nco_op_typ); var_prc[idx_prc]=nco_var_cnf_typ(var_prc_out[idx_prc]->type,var_prc[idx_prc]); /* Perform arithmetic operations: avg, min, max, ttl, ... */ nco_opr_drv(fl_idx+idx_mbr,nco_op_typ,var_prc[idx_prc],var_prc_out[idx_prc]); FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Put old name back */ var_prc[idx_prc]->nm_fll=(char *)nco_free(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=strdup(nm_fll); /* Free current input buffer */ var_prc[idx_prc]->val.vp=nco_free(var_prc[idx_prc]->val.vp); /* Free built variable name */ var_nm_fll=(char *)nco_free(var_nm_fll); nm_fll=(char *)nco_free(nm_fll); } /* end loop over var_prc */ } /* end loop over mbr */ } /* !idx_mbr */ (void)trv_tbl_free(trv_tbl1); } /* End ncge section */ /* For ncge, save helpful metadata for later handling by ncbo */ if(nco_prg_id == ncge && fl_idx == 0) (void)nco_nsm_wrt_att(in_id,out_id,gpe,trv_tbl); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(fp_stderr,"\n"); /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Dispose local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Are all our data tanks already full? */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ if(!flg_input_complete[idx_rec]){ if((flg_input_complete[idx_rec]=lmt_rec[idx_rec]->flg_input_complete)){ /* NB: TODO nco1066 move input_complete break to precede record loop but remember to close open filehandles */ /* 20131209: Rewritten so file skipped only once all record dimensions have flg_input_complete Warnings about superfluous files printed only once per dimension fxm: use flg_input_complete[idx_rec] to skip completed entries in main record dimension loop above */ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stderr,"%s: INFO All requested records for record dimension #%d (%s) were found within the first %d input file%s, next file was opened then skipped, and remaining %d input file%s need not be opened\n",nco_prg_nm_get(),idx_rec,lmt_rec[idx_rec]->nm_fll,fl_idx,(fl_idx == 1) ? "" : "s",fl_nbr-fl_idx-1,(fl_nbr-fl_idx-1 == 1) ? "" : "s"); flg_input_complete_nbr++; } /* endif superfluous */ } /* endif not already known to be complete */ } /* end loop over record dimensions */ /* Once all record dimensions are complete, break-out of file loop */ if(flg_input_complete_nbr == nbr_rec) break; } /* endif ncra || ncrcat */ } /* end loop over fl_idx */ if(FLG_ILV && lmt_rec[0]->ilv > 1 && rec_rmn_prv_ilv != 0) (void)fprintf(stderr,"%s: WARNING input ended while last interleaved sub-cycle was incomplete. This means the interleaved dimension in the last sub-cycle will contain a non-uniform number of records contributing to different indices. Consider re-defining hyperslab or input data length to ensure output all based on complete sub-cycles. Diagnostics: full sub-cycle length = %ld, stride between first elements of consecutive sub-cycles = %ld, records needed for completion of last sub-cycle and of last interleaved index are, respectively, rec_rmn_prv_ssc = %ld, and rec_rmn_prv_ilv = %ld\n",nco_prg_nm_get(),lmt_rec[0]->ssc,lmt_rec[0]->srd,rec_rmn_prv_ssc,rec_rmn_prv_ilv); /* Subcycle argument warning */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* fxm: Remove this or make DBG when crd_val SSC/MRO is predictable? */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ /* Check subcycle for each record */ if(lmt_rec[idx_rec]->ssc > 1L && (lmt_rec[idx_rec]->lmt_typ == lmt_crd_val || lmt_rec[idx_rec]->lmt_typ == lmt_udu_sng)){ if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"\n%s: INFO Subcycle argument SSC used in hyperslab specification for %s which will be determined based on coordinate values rather than dimension indices. The behavior of the subcycle hyperslab argument is ambiguous for coordinate-based hyperslabs---it could mean select the first SSC elements that are within the min and max coordinate values beginning with each strided point, or it could mean always select the first _consecutive_ SSC elements beginning with each strided point (regardless of their values relative to min and max). For such hyperslabs, NCO adopts the latter definition and always selects the group of SSC records beginning with each strided point. Strided points are themselves guaranteed to be within the min and max coordinates, though the subsequent members of each group are not. This is only the case when the record coordinate is not monotonic. The record coordinate is usually monotonic, so unpleasant surprises are only expected in corner cases unlikely to affect the majority of users.\n",nco_prg_nm_get(),lmt_rec[idx_rec]->nm); } /* Check subcycle for each record */ } /* !idx_rec */ } /* Subcycle argument warning */ /* Normalize, multiply, etc where necessary: ncra and nces normalization blocks are identical, except ncra normalizes after every SSC records, while nces normalizes once, after all files. Occassionally last input file(s) is/are superfluous so REC_LST_DSR never set In such cases FLG_BFR_NRM is still true, indicating ncra still needs normalization FLG_BFR_NRM is always true here for ncfe and ncge */ if(FLG_BFR_NRM){ /* First, divide accumulated (not yet weighted) values by tally to obtain (non-weighted) time-means */ if(NRM_BY_DNM) (void)nco_opr_nrm(nco_op_typ,nbr_var_prc,var_prc,var_prc_out,(char *)NULL,(trv_tbl_sct *)NULL); /* Second, multiply unweighted time-mean values by time-mean weights */ for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc[idx]->wgt_sum){ //(void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); if(NORMALIZE_BY_WEIGHT) (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); // original code } /* !wgt_sum */ } /* !idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Third, and only if the weight comes from a record variable in the file ... Compute mean of per-record weight, by normalizing running sum of weight by tally Then normalize all numerical record variables by mean of per-record weight Still ill-defined when MRO is invoked with --wgt Same logic applies in two locations in this code: 1. During SSC normalization inside record loop when REC_LST_DSR is true 2. After file loop for nces, and for ncra with superfluous trailing files */ wgt_avg_scv.type=NC_DOUBLE; wgt_avg->val.dp[0]/=wgt_out->tally[0]; /* NB: wgt_avg tally is kept in wgt_out */ wgt_avg_scv.val.d=wgt_avg->val.dp[0]; for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc_out[idx]->is_crd_var || var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING) continue; nco_scv_cnf_typ(var_prc_out[idx]->type,&wgt_avg_scv); if(NORMALIZE_BY_WEIGHT) (void)nco_var_scv_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,&wgt_avg_scv); } /* end loop over var */ } /* !wgt_nm */ } /* !FLG_BFR_NRM */ /* Manually fix YYMMDD date which was mangled by averaging */ if(cnv->CCM_CCSM_CF && nco_prg_id == ncra) (void)nco_cnv_ccm_ccsm_cf_date(grp_out_id,var_out,xtr_nbr); /* Add time variable to output file NB: nco_cnv_arm_time_install() contains OpenMP critical region */ if(CNV_ARM && nco_prg_id == ncrcat) (void)nco_cnv_arm_time_install(grp_out_id,base_time_srt,dfl_lvl); /* Copy averages to output file for ncfe and ncge always and for ncra when trailing file(s) was/were superfluous */ if(FLG_BFR_NRM){ for(idx=0;idx<nbr_var_prc;idx++){ /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc_out[idx]->nm_fll,trv_tbl); /* For ncge, group to save is ensemble parent group */ if(nco_prg_id == ncge){ /* Check if suffix needed. Appends to default name */ if(trv_tbl->nsm_sfx){ /* Define (append) then use and forget new name */ char *nm_fll_sfx=nco_bld_nsm_sfx(var_trv->grp_nm_fll_prn,trv_tbl); /* Use new name */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,nm_fll_sfx); else grp_out_fll=(char *)strdup(nm_fll_sfx); nm_fll_sfx=(char *)nco_free(nm_fll_sfx); }else{ /* Non suffix case */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->nsm_nm); else grp_out_fll=(char *)strdup(var_trv->nsm_nm); } /* !trv_tbl->nsm_sfx */ }else if(nco_prg_id == ncfe){ /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); } /* end else */ /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Get output variable ID */ (void)nco_inq_varid(grp_out_id,var_prc_out[idx]->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]); /* Packing/Unpacking */ if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(grp_out_id,var_prc_out[idx],nco_pck_plc); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(var_prc_out[idx]->nbr_dim == 0) (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); else (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end loop over idx */ } /* end if ncfe and ncge */ /* Free averaging, tally, and weight buffers */ if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge){ for(idx=0;idx<nbr_var_prc;idx++){ if((wgt_arr || wgt_nm) && var_prc[idx]->has_mss_val) var_prc_out[idx]->wgt_sum=var_prc[idx]->wgt_sum=(double *)nco_free(var_prc[idx]->wgt_sum); var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally); var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); } /* end loop over idx */ } /* endif ncra || nces */ if(flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat || nco_prg_id == ncfe)){ rcd=nco_put_var(out_id,cb->tm_crd_id_out,cb->tm_val,(nc_type)NC_DOUBLE); rcd=nco_put_var(out_id,cb->clm_bnd_id_out,cb->bnd_val,(nc_type)NC_DOUBLE); } /* !flg_cb */ if(flg_cb && (cb->bnd2clm || cb->clm2bnd)){ /* Rename time-bounds as climatology bounds, or visa-versa Otherwise wrong bounds will remain orphaned in output file Also, this ensures same dimensions are used Rename at end of procedure so that traversal table does not get out-of-sync Avoiding renaming would mean creating the new and deleting the old bounds variable That would entail significant modifications to traversal table logic Renaming seems simpler and less error prone */ rcd+=nco_redef(out_id); if(cb->bnd2clm) rcd+=nco_rename_var(out_id,cb->tm_bnd_id_out,cb->clm_bnd_nm); if(cb->clm2bnd) rcd+=nco_rename_var(out_id,cb->clm_bnd_id_out,cb->tm_bnd_nm); rcd+=nco_enddef(out_id); } /* !flg_cb */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* NCO-generic clean-up */ /* Free individual strings/arrays */ //if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(stdout,"%s: free quark3\n",nco_prg_nm_get()); if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(clm_nfo_sng) clm_nfo_sng=(char *)nco_free(clm_nfo_sng); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); if(wgt_arr) wgt_arr=(double *)nco_free(wgt_arr); if(wgt_nm) wgt_nm=(char *)nco_free(wgt_nm); /* Free lists of strings */ if(fl_lst_in && !fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); if(wgt_nbr > 0) wgt_lst_in=nco_sng_lst_free(wgt_lst_in,wgt_nbr); /* Free limits */ for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr); var_prc=(var_sct **)nco_free(var_prc); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix=(var_sct **)nco_free(var_fix); var_fix_out=(var_sct **)nco_free(var_fix_out); if(md5) md5=(md5_sct *)nco_md5_free(md5); if(wgt) wgt=(var_sct *)nco_var_free(wgt); if(wgt_out) wgt_out=(var_sct *)nco_var_free(wgt_out); if(wgt_avg) wgt_avg=(var_sct *)nco_var_free(wgt_avg); /* Free climatology bounds */ if(cb){ if(cb->bnd_val) cb->bnd_val=(double *)nco_free(cb->bnd_val); if(cb->clm_bnd_nm) cb->clm_bnd_nm=(char *)nco_free(cb->clm_bnd_nm); if(cb->cln_val) cb->cln_val=(char *)nco_free(cb->cln_val); if(cb->tm_bnd_nm) cb->tm_bnd_nm=(char *)nco_free(cb->tm_bnd_nm); if(cb->tm_crd_nm) cb->tm_crd_nm=(char *)nco_free(cb->tm_crd_nm); if(cb->tm_val) cb->tm_val=(double *)nco_free(cb->tm_val); if(cb->unt_val) cb->unt_val=(char *)nco_free(cb->unt_val); if(cb) cb=(clm_bnd_sct *)nco_free(cb); } /* !cb */ (void)trv_tbl_free(trv_tbl); for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm); if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne); if(flg_input_complete) flg_input_complete=(nco_bool *)nco_free(flg_input_complete); if(idx_rec_out) idx_rec_out=(long *)nco_free(idx_rec_out); if(rec_in_cml) rec_in_cml=(long *)nco_free(rec_in_cml); if(rec_usd_cml) rec_usd_cml=(long *)nco_free(rec_usd_cml); if(REC_LST_DSR) REC_LST_DSR=(nco_bool *)nco_free(REC_LST_DSR); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
simd-clones-3.c
/* { dg-options "-fopenmp -fdump-tree-optimized -O2" } */ /* Test that if there is no *inbranch clauses, that both the masked and the unmasked version are created. */ #pragma omp declare simd int addit(int a, int b, int c) { return a + b; } /* { dg-final { scan-tree-dump "_ZGVbN4vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVbM4vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVcN4vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVcM4vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVdN8vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVdM8vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVeN16vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */ /* { dg-final { scan-tree-dump "_ZGVeM16vvv_addit" "optimized" { target i?86-*-* x86_64-*-* } } } */
sparseAsyncJacobi.h
// // Created by mbarb on 17/02/2018. // #ifndef PARALLELITERATIVE_SPARSEASYNCJACOBI_H #define PARALLELITERATIVE_SPARSEASYNCJACOBI_H #include <omp.h> #include <Eigen> #include <iostream> #include "utils.h" namespace Iterative { template <typename Scalar> class sparseAsyncJacobi { public: /** * * @param A linear system matrix of max rank * @param b known terms vector * @param iterations max number of iterations * @param tolerance min error tolerated * @param workers number of threads */ explicit sparseAsyncJacobi( const Eigen::SparseMatrix<Scalar>& A, const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b, const ulonglong iterations, const Scalar tolerance, const ulong workers=0L) : A(A), b(b), iterations(iterations), tolerance(tolerance), workers(workers),solution(b) { solution.fill((Scalar)1/solution.size()); omp_set_num_threads(workers); } const Eigen::ColumnVector<Scalar, Eigen::Dynamic> solve() { std::vector<ulonglong> index(solution.size()); for (ulonglong i = 0; i < solution.size(); ++i) index[i]=i; std::vector<ulonglong> remove; for (iteration = 0; iteration < iterations; ++iteration) { //calculate solutions parallelizing on rows #pragma omp parallel #pragma omp for schedule(static) nowait for (long long i = 0; i < index.size(); ++i){ auto el = index[i]; Scalar oldElement = solution[el]; solution[el] = solution_find(b[el], el); Scalar error = std::abs(solution[el]-oldElement); if(error <= tolerance){ #pragma omp critical remove.emplace_back(i); } } if(!remove.empty()){ #pragma omp barrier std::sort(remove.rbegin(), remove.rend()); for (auto i : remove) { index.erase(index.begin() + i); } remove.clear(); if (index.empty()) break; } } std::cout << iteration << std::endl; return this->solution; } const long getIteration() const { return iteration; } const Eigen::ColumnVector<Scalar, -1> &getSolution() const { return solution; } protected: const Eigen::SparseMatrix<Scalar>& A; const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b; const ulonglong iterations; const Scalar tolerance; const ulong workers; Eigen::ColumnVector<Scalar, Eigen::Dynamic> solution; long iteration = 0L; private: /** * utility function implementing the jacobi method in order to find one solution * @param row coeffiient row * @param solutions vector solution * @param term right term vector * @param index index of the solution * @return solution component */ inline Scalar solution_find(Scalar term, const ulonglong index) { term -= A.row(index) * solution; return (term + A.coeff(index, index) * solution[index]) / A.coeff(index, index); } }; }; #endif //PARALLELITERATIVE_ASYNCJACOBI_H
callback.h
#ifndef _BSD_SOURCE #define _BSD_SOURCE #endif #define _DEFAULT_SOURCE #include <stdio.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <omp.h> #include <omp-tools.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", // 1 "ompt_task_yield", // 2 "ompt_task_cancel", // 3 "ompt_task_detach", // 4 "ompt_task_early_fulfill", // 5 "ompt_task_late_fulfill", // 6 "ompt_task_switch" // 7 }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_loop", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static void format_task_type(int type, char *buffer) { char *progress = buffer; if (type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if (type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if (type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static ompt_set_callback_t ompt_set_callback; static ompt_get_callback_t ompt_get_callback; static ompt_get_state_t ompt_get_state; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { int task_type, thread_num; ompt_frame_t *frame; ompt_data_t *task_parallel_data; ompt_data_t *task_data; int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame, &task_parallel_data, &thread_num); char buffer[2048]; format_task_type(task_type, buffer); if (frame) printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, " "task_type=%s=%d, thread_num=%d\n", ompt_get_thread_data()->value, level, exists_task ? task_parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame.ptr, frame->enter_frame.ptr, buffer, task_type, thread_num); } #define get_frame_address(level) __builtin_frame_address(level) #define print_frame(level) \ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \ ompt_get_thread_data()->value, level, get_frame_address(level)) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts a second NOP instruction (another 4 bytes). For non-void runtime // functions Clang inserts a STW instruction (but only if compiling under // -fno-PIC which will be the default with Clang 8.0, another 4 bytes). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8, ((char *)addr) - 12) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \ " or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr) static void on_ompt_callback_mutex_acquire( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_loop) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flags) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); break; } } static void on_ompt_callback_lock_init( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data, uint32_t requested_team_size, int flag, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, " "parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame.ptr, encountering_task_frame->enter_frame.ptr, parallel_data->value, requested_team_size, codeptr_ra, flag); } static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, int flag, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, flag, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(type & ompt_task_initial) { ompt_data_t *parallel_data; ompt_get_parallel_info(0, &parallel_data, NULL); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); } printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL, encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_dependences( ompt_data_t *task_data, const ompt_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr); return 0; //success } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback"); ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } #ifdef __cplusplus extern "C" { #endif ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; } #ifdef __cplusplus } #endif
GB_unop__identity_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_uint16 // op(A') function: GB_unop_tran__identity_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_uint16 ( int16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 4194304 #define ThrowPointExpectedException(image,token) \ { \ (void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(Image *,MVGInfo *,const char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, &draw_info->fill_pattern->exception); else if (draw_info->tile != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue, &draw_info->tile->exception); clone_info->tile=NewImageList(); /* tile is deprecated */ if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,&draw_info->stroke_pattern->exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_opacity=draw_info->fill_opacity; clone_info->stroke_opacity=draw_info->stroke_opacity; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,&draw_info->clipping_mask->exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,&draw_info->composite_mask->exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath( const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; magick_unreferenced(draw_info); /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case PointPrimitive: case ColorPrimitive: case MattePrimitive: case TextPrimitive: case ImagePrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->tile != (Image *) NULL) draw_info->tile=DestroyImage(draw_info->tile); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo extent[4], min, max, point; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetMagickPixelPacket(image,&zero); exception=(&image->exception); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { MagickPixelPacket composite, pixel; PointInfo point; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolateMagickPixelPacket(source,source_view, UndefinedInterpolatePixel,point.x,point.y,&pixel,exception); if (status == MagickFalse) break; SetMagickPixelPacket(image,q,indexes+x_offset,&composite); MagickPixelCompositeOver(&pixel,pixel.opacity,&composite, composite.opacity,&composite); SetPixelPacket(image,&composite,q,indexes+x_offset); x_offset++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorDatabase("#f00",&clone_info->stroke, &image->exception); else status=QueryColorDatabase("#0f0",&clone_info->stroke, &image->exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, &image->exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageClipMask(image,clipping_mask); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(clip_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageClipMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.opacity=(Quantum) TransparentOpacity; status=SetImageBackgroundColor(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); (void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(clip_mask,TrueAlphaChannel); if (draw_info->compliance != SVGCompliance) status&=NegateImage(clip_mask,MagickFalse); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(composite_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; status=RenderMVGContent(composite_mask,clone_info,0); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(composite_mask,TrueAlphaChannel); status&=NegateImage(composite_mask,MagickFalse); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; MagickPixelPacket composite, pixel; register IndexPacket *magick_restrict indexes; register ssize_t i, x; register PixelPacket *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,(double) gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat, (double) gradient->radius); else repeat=fmod(offset,(double) gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } MagickPixelCompositeOver(&composite,composite.opacity,&pixel, pixel.opacity,&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } MagickExport int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { GetNextToken(p,&p,extent,token); if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char key[2*MaxTextExtent], keyword[MaxTextExtent], geometry[MaxTextExtent], name[MaxTextExtent], *next_token, pattern[MaxTextExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PixelPacket start_color; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel); if (status == MagickFalse) return(MagickFalse); } primitive=(char *) NULL; if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=(&image->exception); graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MaxTextExtent; cursor=0.0; defsDepth=0; symbolDepth=0; macros=GetMVGMacros(primitive); status=QueryColorDatabase("#000000",&start_color,&image->exception); for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MaxTextExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->border_color, &image->exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,&image->exception); if (graphic_context[n]->compliance != SVGCompliance) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->fill, &image->exception); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill.opacity != TransparentOpacity) graphic_context[n]->fill.opacity=(Quantum) graphic_context[n]->fill_opacity; else graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange* opacity); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,&image->exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,graphic_context[n]->composite_mask); } break; } if (LocaleCompare("matte",keyword) == 0) { primitive_type=MattePrimitive; break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),DrawError, "UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageClipMask(image,(Image *) NULL); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MaxTextExtent], name[MaxTextExtent], type[MaxTextExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MaxTextExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("mask",token) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { RectangleInfo bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { GradientType type; PixelPacket stop_color; GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&stop_color,&image->exception); type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,&start_color,&stop_color); start_color=stop_color; GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->stroke, &image->exception); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *p; p=q; GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke.opacity != TransparentOpacity) graphic_context[n]->stroke.opacity=(Quantum) graphic_context[n]->stroke_opacity; else graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange* opacity); break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->undercolor, &image->exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; break; } default: break; } if (coordinates > MaxBezierCoordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(image,&mvg_info,token); if (coordinates == 0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case ColorPrimitive: case MattePrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (primitive_info == (PrimitiveInfo *) NULL) break; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } status&=DrawPrimitive(image,graphic_context[n],primitive_info); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryImageException(DrawError, "NonconformingDrawingPrimitiveDefinition",keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) { return(RenderMVGContent(image,draw_info,0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern) { char property[MaxTextExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MaxTextExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info); image_info=DestroyImageInfo(image_info); (void) QueryColorDatabase("#00000000",&(*pattern)->background_color, &image->exception); (void) SetImageBackgroundColor(*pattern); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MaxTextExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(draw_info,primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_opacity) { double alpha, beta, distance, subpath_opacity; PointInfo delta; register EdgeInfo *p; register const PointInfo *q; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_opacity=0.0; subpath_opacity=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_opacity < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_opacity=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25))) *stroke_opacity=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0)) continue; if (distance <= 0.0) { subpath_opacity=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_opacity < (alpha*alpha)) subpath_opacity=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_opacity >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_opacity); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; double mid; ExceptionInfo *exception; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0]); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) (void) GetFillColor(draw_info,x-start_x,y-start_y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_opacity, stroke_opacity; PixelPacket fill_color, stroke_color; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill, draw_info->fill_rule,x,y,&stroke_opacity); if (draw_info->stroke_antialias == MagickFalse) { fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0; stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0; } (void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color); fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange- fill_color.opacity)); MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q, (MagickRealType) q->opacity,q); (void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color); stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange- stroke_color.opacity)); MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q, (MagickRealType) q->opacity,q); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double) -SSIZE_MAX) return((double) -SSIZE_MAX); if (x > (double) SSIZE_MAX) return((double) SSIZE_MAX); return(x); } static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case MattePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "MattePrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; ExceptionInfo *exception; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } exception=(&image->exception); status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelGray(&draw_info->fill) == MagickFalse) || (IsPixelGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,draw_info->clipping_mask); status&=SetImageMask(image,draw_info->composite_mask); } x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5)); y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case MattePrimitive: { if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); switch (primitive_info->method) { case PointMethod: default: { PixelPacket pixel; PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket pixel, target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; PixelPacket pixel; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MaxTextExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, &image->exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MaxTextExtent); composite_images=ReadImage(clone_info,&image->exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { char geometry[MaxTextExtent]; /* Resize image. */ (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!", primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL,geometry); } if (composite_image->matte == MagickFalse) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel); if (draw_info->opacity != OpaqueOpacity) (void) SetImageOpacity(composite_image,draw_info->opacity); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry, &image->exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) (void) DrawAffineImage(image,composite_image,&affine); else (void) CompositeImage(image,draw_info->compose,composite_image, geometry.x,geometry.y); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelPacket fill_color; PixelPacket *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MaxTextExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.opacity != (Quantum) TransparentOpacity)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.opacity != (Quantum) TransparentOpacity) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,(Image *) NULL); status&=SetImageMask(image,(Image *) NULL); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,&clone_info->stroke_pattern->exception); clone_info->stroke.opacity=(Quantum) TransparentOpacity; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p); status&=DrawRoundLinecap(image,draw_info,q); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorDatabase("#000F",&draw_info->fill,exception); (void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->opacity=OpaqueOpacity; draw_info->fill_opacity=OpaqueOpacity; draw_info->stroke_opacity=OpaqueOpacity; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; draw_info->pointsize=12.0; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->undercolor.opacity=(Quantum) TransparentOpacity; draw_info->border_color=clone_info->border_color; draw_info->compose=OverCompositeOp; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->fill,exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->stroke,exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->undercolor,exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); p=primitive_info; status=MagickTrue; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == MagickFalse) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(status == 0 ? MagickFalse : MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) return(MagickFalse); p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if ((coordinates > (double) SSIZE_MAX) || (coordinates > (double) GetMaxMemoryRequest())) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path) { char *next_token, token[MaxTextExtent]; const char *p; double x, y; int attribute, last_attribute; MagickStatusType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(image,token); break; } } } if (status == MagickFalse) return(0); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define CheckPathExtent(pad) \ if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \ { \ if (~max_strokes < (pad)) \ { \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ } \ else \ { \ max_strokes+=(pad); \ path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \ sizeof(*path_p)); \ path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \ sizeof(*path_q)); \ } \ if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \ { \ if (path_p != (PointInfo *) NULL) \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ if (path_q != (PointInfo *) NULL) \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx = {0,0}, dy = {0,0}, inverse_slope = {0,0}, slope = {0,0}, theta = {0,0}; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); if (path_p == (PointInfo *) NULL) { polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); if (path_q == (PointInfo *) NULL) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(6*BezierQuantum+360); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
deltaprocessing.h
#ifndef DELTAPROCESSING_H #define DELTAPROCESSING_H #include <QDebug> #include <QList> #include <opencv2/core/core.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/imgcodecs.hpp> #include <memory> #include "headers/cpp_interface/fpsoptions.h" //! TODO class DeltaProcessing { //! constructors public: //! TODO DeltaProcessing() : _received_first_frames(false) , _max_video_count(3) { _init_member(); } //! methods public: //! TODO QList<cv::Mat> check_for_difference(const QList<cv::Mat> & cv_frame_list , std::shared_ptr<QList<FPSOptions>> shared_fps_options_list) { //! TODO Q_UNUSED(shared_fps_options_list); QList<cv::Mat> difference_frames; for (int i = 0; i < cv_frame_list.size(); ++i) { difference_frames.push_back(cv::Mat()); } if (!_received_first_frames) { _received_first_frames = true; // save the current frame list _cache_framelist(cv_frame_list); // return the normal frames as we can't calculate a difference return cv_frame_list; } else { // if multiple videos are loaded, the cache list has not all frames loaded, wait for next iteration // refactored this from the loop to allow omp bool all_cached_frames_filled = true; for (int i = 0; i < cv_frame_list.size(); ++i) { all_cached_frames_filled = all_cached_frames_filled && !_cached_frames[i].empty(); } // TODO test this for performance if (all_cached_frames_filled) { #pragma omp parallel for for (int i = 0; i < cv_frame_list.size(); ++i) { const quint32 pixel_difference = (*shared_fps_options_list)[i].pixel_difference.value(); difference_frames[i] = _get_difference(_cached_frames[i], cv_frame_list[i], pixel_difference).clone(); } } else { // we could try to calculate the difference for each frame which we have a difference for // but it might be a hassle if the second video got deleted live, because the indices get moved // it's easier to simply wait until the new frames arrive at the correct index. I should probably // disable removing files while exporting } } // save the current frame list _cache_framelist(cv_frame_list); return difference_frames; } //! TODO void reset_state() { _cached_frames.clear(); _init_member(); } //! methods private: //! TODO void _init_member() { // prepare buffer for each video for (int i = 0; i < _max_video_count; ++i) { _cached_frames.push_back(cv::Mat()); } // first frames can't be compared _received_first_frames = false; } //! make this chooseable? cv::Mat _get_difference(const cv::Mat & first_frame, const cv::Mat & second_frame, const quint32 pixel_difference) const { cv::Mat difference; //cv::absdiff(first_frame, second_frame, difference); _are_equal_with_draw(first_frame, second_frame, static_cast<int>(pixel_difference), difference); return difference; } //! TODO rethink this //! take a look at https://stackoverflow.com/questions/18464710/how-to-do-per-element-comparison-and-do-different-operation-according-to-result void _are_equal_with_draw(const cv::Mat & frame_a, const cv::Mat & frame_b, const int pixel_difference, cv::Mat & output) const { cv::Mat black_white_frame_a; cv::Mat black_white_frame_b; cv::cvtColor(frame_a, black_white_frame_a, cv::COLOR_BGRA2GRAY); cv::cvtColor(frame_b, black_white_frame_b, cv::COLOR_BGRA2GRAY); output = frame_a.clone(); for (int i = 0; i < black_white_frame_a.rows; i += 1) { for (int j = 0; j < black_white_frame_a.cols; j += 1) { int ac(std::max(black_white_frame_a.at<uchar>(i, j) , black_white_frame_b.at<uchar>(i, j))); int bc(std::min(black_white_frame_a.at<uchar>(i, j) , black_white_frame_b.at<uchar>(i, j))); if (ac - bc > pixel_difference) { // on difference, set to white output.at<cv::Vec3b>(i,j)[0] = 255; output.at<cv::Vec3b>(i,j)[1] = 255; output.at<cv::Vec3b>(i,j)[2] = 255; } else { // on "same" pixel, set to black output.at<cv::Vec3b>(i,j)[0] = 0; output.at<cv::Vec3b>(i,j)[1] = 0; output.at<cv::Vec3b>(i,j)[2] = 0; } } } } //! TODO void _cache_framelist(const QList<cv::Mat> _other) { for (int i = 0; i < _other.size(); ++i) { _cached_frames[i] = _other[i].clone(); } } //! member private: //! TODO bool _received_first_frames; //! TODO QList<cv::Mat> _cached_frames; //! TODO const quint8 _max_video_count; }; #endif // DELTAPROCESSING_H
VertexStep.h
#ifndef VERTEX_STEP_H #define VERTEX_STEP_H #include <string> #include <vector> #include <set> #include <algorithm> #include "step/TraversalStep.h" #include "structure/Direction.h" #include "step/graph/GraphStep.h" #include "traversal/Traverser.h" #include "traversal/GraphTraversal.h" #include <boost/lockfree/stack.hpp> #define VERTEX_STEP 0x80 class VertexStep : public TraversalStep { private: Direction direction; std::set<std::string> edge_labels; GraphStepType gsType; public: VertexStep(Direction dir, std::vector<std::string> edge_labels_arg, GraphStepType gsType_arg) : TraversalStep(MAP, VERTEX_STEP) { direction = dir; std::for_each(edge_labels_arg.begin(), edge_labels_arg.end(), [&](std::string str){ this->edge_labels.insert(str); }); this->gsType = gsType_arg; } VertexStep(Direction dir, GraphStepType gsType_arg) : TraversalStep(MAP, VERTEX_STEP) { direction = dir; this->gsType = gsType_arg; } Direction get_direction() { return this->direction; } std::set<std::string> get_labels() { return this->edge_labels; } virtual std::string getInfo() { std::string info = "VertexStep("; info += (direction == IN ? "IN" : direction == OUT ? "OUT" : "BOTH"); info += ", "; if(!edge_labels.empty()) { info += "{"; auto p = edge_labels.begin(); for(int k = 0; k < edge_labels.size() - 1; k++) info = info + *(p++) + ", "; info = info + *p + "}"; } else info += "{}"; info = info + ", " + (gsType == VERTEX ? "VERTEX" : "EDGE"); return info + ")"; } virtual void apply(GraphTraversal* traversal, TraverserSet& traversers) { bool label_required = !this->edge_labels.empty(); std::vector<Traverser*> new_traversers; //boost::lockfree::stack<Traverser*> new_traversers(8); std::for_each(traversers.begin(), traversers.end(), [&, this](Traverser* trv) { Vertex* v = boost::any_cast<Vertex*>(trv->get()); std::vector<Edge*> edges = v->edges(direction); //#pragma omp for for(size_t k = 0; k < edges.size(); ++k) { Edge* e = edges[k]; if(label_required && this->edge_labels.count(e->label()) == 0) continue; switch(direction) { case IN: { Vertex* w = e->outV(); new_traversers.push_back(new Traverser(w)); break; } case OUT: { Vertex* w = e->inV(); new_traversers.push_back(new Traverser(w)); break; } case BOTH: { Vertex* u = e->outV(); Vertex* w = u == v ? e->inV() : u; new_traversers.push_back(new Traverser(w)); break; } } } }); traversers.swap(new_traversers); std::for_each(new_traversers.begin(), new_traversers.end(), [](Traverser* trav){delete trav;}); //traversers.clear(); //new_traversers.consume_all([&](Traverser* trv){ traversers.push_back(trv); }); } }; #endif
sharpen.c
// Sam Siewert, July 16, 2020 // // Based on basic PSF convolution as documented in DSP Engineer's Handbook // // http://www.dspguide.com/pdfbook.htm // #include <stdlib.h> #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <fcntl.h> #include <time.h> //#define IMG_HEIGHT (240) //#define IMG_WIDTH (320) #define IMG_HEIGHT (960) #define IMG_WIDTH (1280) #define ITERATIONS (90) #define FAST_IO typedef double FLOAT; typedef unsigned int UINT32; typedef unsigned long long int UINT64; typedef unsigned char UINT8; // PPM Edge Enhancement Code // UINT8 header[22]; UINT8 R[IMG_HEIGHT*IMG_WIDTH]; UINT8 G[IMG_HEIGHT*IMG_WIDTH]; UINT8 B[IMG_HEIGHT*IMG_WIDTH]; UINT8 convR[IMG_HEIGHT*IMG_WIDTH]; UINT8 convG[IMG_HEIGHT*IMG_WIDTH]; UINT8 convB[IMG_HEIGHT*IMG_WIDTH]; // PPM image array with channels UINT8 RGB[IMG_HEIGHT*IMG_WIDTH*3]; // controls sharpness // increase from K=4.0 and F=8.0 for sharper edges #define K 4.0 #define F 8.0 //#define F 80.0 FLOAT PSF[9] = {-K/F, -K/F, -K/F, -K/F, K+1.0, -K/F, -K/F, -K/F, -K/F}; int main(int argc, char *argv[]) { int fdin, fdout, bytesRead=0, bytesWritten=0, bytesLeft, i, j, iter, rc, pixel, readcnt=0, writecnt=0; UINT64 microsecs=0, millisecs=0; FLOAT temp, fstart, fnow; struct timespec start, now; int thread_count=4; clock_gettime(CLOCK_MONOTONIC, &start); fstart = (FLOAT)start.tv_sec + (FLOAT)start.tv_nsec / 1000000000.0; if(argc < 3) { printf("Usage: sharpen input_file.ppm output_file.ppm\n"); exit(-1); } else { if((fdin = open(argv[1], O_RDONLY, 0644)) < 0) { printf("Error opening %s\n", argv[1]); } //else // printf("File opened successfully\n"); if((fdout = open(argv[2], (O_RDWR | O_CREAT), 0666)) < 0) { printf("Error opening %s\n", argv[1]); } //else // printf("Output file=%s opened successfully\n", "sharpen.ppm"); } bytesLeft=21; //printf("Reading header\n"); // read in all data do { //printf("bytesRead=%d, bytesLeft=%d\n", bytesRead, bytesLeft); bytesRead=read(fdin, (void *)header, bytesLeft); bytesLeft -= bytesRead; } while(bytesLeft > 0); header[21]='\0'; printf("header = %s\n", header); #ifdef FAST_IO bytesRead=0; bytesLeft=IMG_HEIGHT*IMG_WIDTH*3; readcnt=0; printf("START: read %d, bytesRead=%d, bytesLeft=%d\n", readcnt, bytesRead, bytesLeft); // Read in RGB data in large chunks, requesting all and reading residual do { bytesRead=read(fdin, (void *)&RGB[bytesRead], bytesLeft); bytesLeft -= bytesRead; readcnt++; printf("read %d, bytesRead=%d, bytesLeft=%d\n", readcnt, bytesRead, bytesLeft); } while((bytesLeft > 0) && (readcnt < 3)); printf("END: read %d, bytesRead=%d, bytesLeft=%d\n", readcnt, bytesRead, bytesLeft); // create in memory copy from input by channel for(i=0, pixel=0; i<IMG_HEIGHT*IMG_WIDTH; i++, pixel+=3) { R[i]=RGB[pixel+0]; convR[i]=R[i]; G[i]=RGB[pixel+1]; convG[i]=G[i]; B[i]=RGB[pixel+2]; convB[i]=B[i]; } #else // Read RGB data - Very slow one byte at time! for(i=0; i<IMG_HEIGHT*IMG_WIDTH; i++) { rc=read(fdin, (void *)&R[i], 1); convR[i]=R[i]; rc=read(fdin, (void *)&G[i], 1); convG[i]=G[i]; rc=read(fdin, (void *)&B[i], 1); convB[i]=B[i]; } #endif clock_gettime(CLOCK_MONOTONIC, &now); fnow = (FLOAT)now.tv_sec + (FLOAT)now.tv_nsec / 1000000000.0; printf("\nstart test at %lf\n", fnow-fstart); clock_gettime(CLOCK_MONOTONIC, &start); fstart = (FLOAT)start.tv_sec + (FLOAT)start.tv_nsec / 1000000000.0; #pragma omp parallel for num_threads(thread_count) for(iter=0; iter < ITERATIONS; iter++) { // Skip first and last row, no neighbors to convolve with for(i=1; i<((IMG_HEIGHT)-1); i++) { // Skip first and last column, no neighbors to convolve with for(j=1; j<((IMG_WIDTH)-1); j++) { temp=0; temp += (PSF[0] * (FLOAT)R[((i-1)*IMG_WIDTH)+j-1]); temp += (PSF[1] * (FLOAT)R[((i-1)*IMG_WIDTH)+j]); temp += (PSF[2] * (FLOAT)R[((i-1)*IMG_WIDTH)+j+1]); temp += (PSF[3] * (FLOAT)R[((i)*IMG_WIDTH)+j-1]); temp += (PSF[4] * (FLOAT)R[((i)*IMG_WIDTH)+j]); temp += (PSF[5] * (FLOAT)R[((i)*IMG_WIDTH)+j+1]); temp += (PSF[6] * (FLOAT)R[((i+1)*IMG_WIDTH)+j-1]); temp += (PSF[7] * (FLOAT)R[((i+1)*IMG_WIDTH)+j]); temp += (PSF[8] * (FLOAT)R[((i+1)*IMG_WIDTH)+j+1]); if(temp<0.0) temp=0.0; if(temp>255.0) temp=255.0; convR[(i*IMG_WIDTH)+j]=(UINT8)temp; temp=0; temp += (PSF[0] * (FLOAT)G[((i-1)*IMG_WIDTH)+j-1]); temp += (PSF[1] * (FLOAT)G[((i-1)*IMG_WIDTH)+j]); temp += (PSF[2] * (FLOAT)G[((i-1)*IMG_WIDTH)+j+1]); temp += (PSF[3] * (FLOAT)G[((i)*IMG_WIDTH)+j-1]); temp += (PSF[4] * (FLOAT)G[((i)*IMG_WIDTH)+j]); temp += (PSF[5] * (FLOAT)G[((i)*IMG_WIDTH)+j+1]); temp += (PSF[6] * (FLOAT)G[((i+1)*IMG_WIDTH)+j-1]); temp += (PSF[7] * (FLOAT)G[((i+1)*IMG_WIDTH)+j]); temp += (PSF[8] * (FLOAT)G[((i+1)*IMG_WIDTH)+j+1]); if(temp<0.0) temp=0.0; if(temp>255.0) temp=255.0; convG[(i*IMG_WIDTH)+j]=(UINT8)temp; temp=0; temp += (PSF[0] * (FLOAT)B[((i-1)*IMG_WIDTH)+j-1]); temp += (PSF[1] * (FLOAT)B[((i-1)*IMG_WIDTH)+j]); temp += (PSF[2] * (FLOAT)B[((i-1)*IMG_WIDTH)+j+1]); temp += (PSF[3] * (FLOAT)B[((i)*IMG_WIDTH)+j-1]); temp += (PSF[4] * (FLOAT)B[((i)*IMG_WIDTH)+j]); temp += (PSF[5] * (FLOAT)B[((i)*IMG_WIDTH)+j+1]); temp += (PSF[6] * (FLOAT)B[((i+1)*IMG_WIDTH)+j-1]); temp += (PSF[7] * (FLOAT)B[((i+1)*IMG_WIDTH)+j]); temp += (PSF[8] * (FLOAT)B[((i+1)*IMG_WIDTH)+j+1]); if(temp<0.0) temp=0.0; if(temp>255.0) temp=255.0; convB[(i*IMG_WIDTH)+j]=(UINT8)temp; } } } clock_gettime(CLOCK_MONOTONIC, &now); fnow = (FLOAT)now.tv_sec + (FLOAT)now.tv_nsec / 1000000000.0; printf("stop test at %lf for %d frames, fps=%lf, pps=%lf\n\n", fnow-fstart, ITERATIONS, ITERATIONS/(fnow-fstart), (ITERATIONS*IMG_HEIGHT*IMG_WIDTH)/(fnow-fstart)); rc=write(fdout, (void *)header, 21); #ifdef FAST_IO // create in memory copy from input by channel for(i=0, pixel=0; i<IMG_HEIGHT*IMG_WIDTH; i++, pixel+=3) { RGB[pixel+0]=convR[i]; RGB[pixel+1]=convG[i]; RGB[pixel+2]=convB[i]; } bytesWritten=0; bytesLeft=IMG_HEIGHT*IMG_WIDTH*3; writecnt=0; printf("START: write %d, bytesWritten=%d, bytesLeft=%d\n", writecnt, bytesWritten, bytesLeft); // Write RGB data in large chunks, requesting all at once and writing residual do { bytesWritten=write(fdout, (void *)&RGB[bytesWritten], bytesLeft); bytesLeft -= bytesWritten; writecnt++; printf("write %d, bytesWritten=%d, bytesLeft=%d\n", writecnt, bytesWritten, bytesLeft); } while((bytesLeft > 0) && (writecnt < 3)); printf("END: write %d, bytesWritten=%d, bytesLeft=%d\n", writecnt, bytesWritten, bytesLeft); #else // Write RGB data - very slow 1 byte at a time! for(i=0; i<IMG_HEIGHT*IMG_WIDTH; i++) { rc=write(fdout, (void *)&convR[i], 1); rc=write(fdout, (void *)&convG[i], 1); rc=write(fdout, (void *)&convB[i], 1); } #endif close(fdin); close(fdout); }
GB_unop__exp_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc64_fc64) // op(A') function: GB (_unop_tran__exp_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cexp (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexp (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cexp (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cexp (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cexp (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int64 // A.*B function (eWiseMult): GB_AemultB__lor_int64 // A*D function (colscale): GB_AxD__lor_int64 // D*A function (rowscale): GB_DxB__lor_int64 // C+=B function (dense accum): GB_Cdense_accumB__lor_int64 // C+=b function (dense accum): GB_Cdense_accumb__lor_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int64 // C=scalar+B GB_bind1st__lor_int64 // C=scalar+B' GB_bind1st_tran__lor_int64 // C=A+scalar GB_bind2nd__lor_int64 // C=A'+scalar GB_bind2nd_tran__lor_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT64 || GxB_NO_LOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
likelihoods.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_LIKELIHOODS_ #define GPB_LIKELIHOODS_ #define _USE_MATH_DEFINES // for M_SQRT1_2 and M_PI #include <cmath> #include <GPBoost/type_defs.h> #include <GPBoost/sparse_matrix_utils.h> #include <GPBoost/DF_utils.h> #include <string> #include <set> #include <string> #include <vector> #include <LightGBM/utils/log.h> using LightGBM::Log; //Mathematical constants usually defined in cmath #ifndef M_SQRT2 #define M_SQRT2 1.414213562373095048801688724209698079 //sqrt(2) #endif #include <chrono> // only for debugging #include <thread> // only for debugging //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("TOTAL TIME for mode calculation: %g", el_time);// Only for debugging namespace GPBoost { /*! * \brief This class implements the likelihoods for the Gaussian proceses * The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t> */ template<typename T_mat, typename T_chol> class Likelihood { public: /*! \brief Constructor */ Likelihood(); /*! * \brief Constructor * \param type Type of likelihood * \param num_data Number of data points * \param num_re Number of random effects * \param Indicates whether the vector a_vec_ / a=ZSigmaZt^-1 is used or not */ Likelihood(string_t type, data_size_t num_data, data_size_t num_re, bool has_a_vec) { string_t likelihood = ParseLikelihoodAlias(type); if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) { Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str()); } likelihood_type_ = likelihood; num_data_ = num_data; num_re_ = num_re; if (likelihood_type_ == "gamma") { aux_pars_ = { 1. };//shape parameter, TODO: also estimate this parameter } chol_fact_pattern_analyzed_ = false; has_a_vec_ = has_a_vec; } /*! * \brief Initialize mode vector_ (used in Laplace approximation for non-Gaussian data) */ void InitializeModeAvec() { mode_ = vec_t::Zero(num_re_); mode_previous_value_ = vec_t::Zero(num_re_); if (has_a_vec_) { a_vec_ = vec_t::Zero(num_re_); a_vec_previous_value_ = vec_t::Zero(num_re_); } mode_initialized_ = true; first_deriv_ll_ = vec_t(num_data_); second_deriv_neg_ll_ = vec_t(num_data_); } /*! * \brief Reset mode to previous value. This is used if too large step-sizes are done which result in increases in the objective function. " The values (covariance parameters and linear coefficients) are then discarded and consequently the mode should also be reset to the previous value) */ void ResetModeToPreviousValue() { CHECK(mode_initialized_); mode_ = mode_previous_value_; if (has_a_vec_) { a_vec_ = a_vec_previous_value_; } } /*! \brief Destructor */ ~Likelihood() { } /*! * \brief Returns the type of likelihood */ string_t GetLikelihood() const { return(likelihood_type_); } /*! * \brief Set the type of likelihood * \param type Likelihood name */ void SetLikelihood(const string_t& type) { string_t likelihood = ParseLikelihoodAlias(type); if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) { Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str()); } likelihood_type_ = likelihood; chol_fact_pattern_analyzed_ = false; } /*! * \brief Returns the type of the response variable (label). Either "double" or "int" */ string_t label_type() const { if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit" || likelihood_type_ == "poisson") { return("int"); } else { return("double"); } } /*! * \brief Checks whether the response variables (labels) have the correct values * \param y_data Response variable data * \param num_data Number of data points */ template <typename T>//T can be double or float void CheckY(const T* y_data, const data_size_t num_data) const { if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit") { //#pragma omp parallel for schedule(static)//problematic with error message below... for (data_size_t i = 0; i < num_data; ++i) { if (fabs(y_data[i]) >= EPSILON_ && !AreSame<T>(y_data[i], 1.)) { Log::REFatal("Response variable (label) data needs to be 0 or 1 for likelihood of type '%s'.", likelihood_type_.c_str()); } } } else if (likelihood_type_ == "poisson") { for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] < 0) { Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str()); } else { double intpart; if (std::modf(y_data[i], &intpart) != 0.0) { Log::REFatal("Found non-integer response variable. Response variable can only be integer valued for likelihood of type '%s'.", likelihood_type_.c_str()); } } } } else if (likelihood_type_ == "gamma") { for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] < 0) { Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str()); } } } } /*! * \brief Calculate normalizing constant for (log-)likelihood calculation * \param y_data Response variable data * \param num_data Number of data points */ template <typename T>//T can be double or int void CalculateNormalizingConstant(const T* y_data, const data_size_t num_data) { if (likelihood_type_ == "poisson") { double log_normalizing_constant = 0.; #pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant) for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] > 1) { double log_factorial = 0.; for (int k = 2; k <= y_data[i]; ++k) { log_factorial += std::log(k); } log_normalizing_constant += log_factorial; } } log_normalizing_constant_ = log_normalizing_constant; } else if (likelihood_type_ == "gamma") { // //Currently not used since aux_pars_[0]==1 and thus log_normalizing_constant_==0 // double log_normalizing_constant = 0.; //#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant) // for (data_size_t i = 0; i < num_data; ++i) { // log_normalizing_constant += -(aux_pars_[0] - 1.) * std::log(y_data[i]) - aux_pars_[0] * std::log(aux_pars_[0]) + std::tgamma(aux_pars_[0]); // } // log_normalizing_constant_ = log_normalizing_constant; log_normalizing_constant_ = 0. * y_data[0];//y_data[0] is just a trick to avoid compiler warnings complaning about unreferenced parameters... } normalizing_constant_has_been_calculated_ = true; } /*! * \brief Evaluate the log-likelihood conditional on the latent variable (=location_par) * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ double LogLikelihood(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (!normalizing_constant_has_been_calculated_) { Log::REFatal("The normalizing constant has not been calculated. Call 'CalculateNormalizingConstant' first."); } double ll = 0.; if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { if (y_data_int[i] == 0) { ll += std::log(1 - normalCDF(location_par[i])); } else { ll += std::log(normalCDF(location_par[i])); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += y_data_int[i] * location_par[i] - std::log(1 + std::exp(location_par[i])); //Alternative version: //if (y_data_int[i] == 0) { // ll += std::log(1 - CondMeanLikelihood(location_par[i]));//CondMeanLikelihood = logistic function //} //else { // ll += std::log(CondMeanLikelihood(location_par[i])); //} } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += y_data_int[i] * location_par[i] - std::exp(location_par[i]); } ll -= log_normalizing_constant_; } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += -aux_pars_[0] * (location_par[i] + y_data[i] * std::exp(-location_par[i])); } ll -= log_normalizing_constant_; } return(ll); } /*! * \brief Calculate the first derivative of the log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ void CalcFirstDerivLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { if (y_data_int[i] == 0) { first_deriv_ll_[i] = -normalPDF(location_par[i]) / (1 - normalCDF(location_par[i])); } else { first_deriv_ll_[i] = normalPDF(location_par[i]) / normalCDF(location_par[i]); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = y_data_int[i] - CondMeanLikelihood(location_par[i]);//CondMeanLikelihood = logistic(x) } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = y_data_int[i] - std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = aux_pars_[0] * (y_data[i] * std::exp(-location_par[i]) - 1.); } } } /*! * \brief Calculate the second derivative of the negative (!) log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ void CalcSecondDerivNegLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double dnorm = normalPDF(location_par[i]); double pnorm = normalCDF(location_par[i]); if (y_data_int[i] == 0) { double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm); second_deriv_neg_ll_[i] = -dnorm_frac_one_min_pnorm * (location_par[i] - dnorm_frac_one_min_pnorm); } else { double dnorm_frac_pnorm = dnorm / pnorm; second_deriv_neg_ll_[i] = dnorm_frac_pnorm * (location_par[i] + dnorm_frac_pnorm); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double exp_loc_i = std::exp(location_par[i]); second_deriv_neg_ll_[i] = exp_loc_i * std::pow(1. + exp_loc_i, -2); } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { second_deriv_neg_ll_[i] = std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { second_deriv_neg_ll_[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]); } } } /*! * \brief Calculate the third derivative of the log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points * \param[out] third_deriv Third derivative of the log-likelihood with respect to the location parameter. Need to pre-allocate memory of size num_data */ void CalcThirdDerivLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data, double* third_deriv) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double dnorm = normalPDF(location_par[i]); double pnorm = normalCDF(location_par[i]); if (y_data_int[i] == 0) { double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm); third_deriv[i] = dnorm_frac_one_min_pnorm * (1 - location_par[i] * location_par[i] + dnorm_frac_one_min_pnorm * (3 * location_par[i] - 2 * dnorm_frac_one_min_pnorm)); } else { double dnorm_frac_pnorm = dnorm / pnorm; third_deriv[i] = dnorm_frac_pnorm * (location_par[i] * location_par[i] - 1 + dnorm_frac_pnorm * (3 * location_par[i] + 2 * dnorm_frac_pnorm)); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double exp_loc_i = std::exp(location_par[i]); third_deriv[i] = -exp_loc_i * (1. - exp_loc_i) * std::pow(1 + exp_loc_i, -3); } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { third_deriv[i] = -std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { third_deriv[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]); } } } /*! * \brief Calculate the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double CondMeanLikelihood(const double value) const { if (likelihood_type_ == "gaussian") { return value; } else if (likelihood_type_ == "bernoulli_probit") { return normalCDF(value); } else if (likelihood_type_ == "bernoulli_logit") { return 1. / (1. + std::exp(-value)); } else if (likelihood_type_ == "poisson") { return std::exp(value); } else if (likelihood_type_ == "gamma") { return std::exp(value); } else { Log::REFatal("CondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Calculate the first derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double FirstDerivLogCondMeanLikelihood(const double value) const { if (likelihood_type_ == "bernoulli_logit") { return 1. / (1. + std::exp(value)); } else if (likelihood_type_ == "poisson") { return 1.; } else if (likelihood_type_ == "gamma") { return 1.; } else { Log::REFatal("FirstDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Calculate the second derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double SecondDerivLogCondMeanLikelihood(const double value) const { if (likelihood_type_ == "bernoulli_logit") { double exp_x = std::exp(value); return -exp_x / ((1. + exp_x) * (1. + exp_x)); } else if (likelihood_type_ == "poisson") { return 0.; } else if (likelihood_type_ == "gamma") { return 0.; } else { Log::REFatal("SecondDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Do Cholesky decomposition * \param[out] chol_fact Cholesky factor * \param psi Matrix for which the Cholesky decomposition should be done */ template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr > void CalcChol(T_chol& chol_fact, const T_mat_1& psi) { if (!chol_fact_pattern_analyzed_) { chol_fact.analyzePattern(psi); chol_fact_pattern_analyzed_ = true; } chol_fact.factorize(psi); } template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr > void CalcChol(T_chol& chol_fact, const T_mat_1& psi) { chol_fact.compute(psi); } /*! * \brief Apply permutation matrix of Cholesky factor (if it exists) * \param chol_fact Cholesky factor * \param M[out] Matrix to which the permutation is applied to */ template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(const T_chol& chol_fact, T_mat_1& M) { if (chol_fact.permutationP().size() > 0) {//Apply permutation if an ordering is used M = chol_fact.permutationP() * M; } } template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(const T_chol&, T_mat_1&) { } /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; a_vec_previous_value_ = a_vec_; } bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par; // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion if (no_fixed_effects) { approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } double approx_marginal_ll_new; vec_t rhs, v_aux;//auxiliary variables sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) Wsqrt.setIdentity(); T_mat Id(num_data, num_data); Id.setIdentity(); T_mat Id_plus_Wsqrt_ZSigmaZt_Wsqrt; // Start finding mode int it; bool terminate_optim = false; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } // Calculate Cholesky factor of matrix B = Id + Wsqrt * Z*Sigma*Zt * Wsqrt Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt; CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt); // Update mode and a_vec_ rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array(); v_aux = Wsqrt * (*ZSigmaZt) * rhs; a_vec_ = rhs - Wsqrt * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux)); mode_ = (*ZSigmaZt) * a_vec_; // Calculate new objective function if (no_fixed_effects) { approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) { Log::REDebug(NA_OR_INF_WARNING_); break; } if (it == 0) { if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration terminate_optim = true; } } else { if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { terminate_optim = true; } } if (terminate_optim) { if (approx_marginal_ll_new < approx_marginal_ll) { Log::REDebug(NO_INCREASE_IN_MLL_WARNING_); } approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } } if (it == MAXIT_MODE_NEWTON_) { Log::REDebug(NO_CONVERGENCE_WARNING_); } if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt; CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt); approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLStable"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("a"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("a[%d]: %g", i, a_vec_[i]); //} }//end FindModePostRandEffCalcMLLStable /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using * a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt). * This version is used for the Laplace approximation when there is only one Gaussian process and * there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t * const random_effects_indices_of_data, double& approx_marginal_ll) { //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; a_vec_previous_value_ = a_vec_; } vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; vec_t diag_sqrt_ZtWZ(num_re_);//sqrt of diagonal matrix ZtWZ T_mat Id(num_re_, num_re_); Id.setIdentity(); T_mat Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt; vec_t rhs, v_aux; int it; bool terminate_optim = false; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate right hand side for mode update diag_sqrt_ZtWZ.setZero(); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel //Non-parallel version //for (data_size_t i = 0; i < num_data; ++i) { // diag_sqrt_ZtWZ[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; //} rhs = (diag_sqrt_ZtWZ.array() * mode_.array()).matrix();//rhs = ZtWZ * mode_ + Zt * first_deriv_ll_ for updating mode #pragma omp parallel { vec_t rhs_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { rhs[i_re] += rhs_private[i_re]; } }//end omp critical }//end omp parallel // Calculate Cholesky factor of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt(); Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal(); CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);//this is the bottleneck (for large data and sparse matrices) ////only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt));//only for debugging //T_mat chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();//only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_: number non zeros = %d", GetNumberNonZeros<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt));//only for debugging // Update mode and a_vec_ v_aux = (*Sigma) * rhs; v_aux.array() *= diag_sqrt_ZtWZ.array(); a_vec_ = -chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux); a_vec_.array() *= diag_sqrt_ZtWZ.array(); a_vec_.array() += rhs.array(); mode_ = (*Sigma) * a_vec_; // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) { Log::REDebug(NA_OR_INF_WARNING_); break; } //Log::REInfo("it = %d, approx_marginal_ll = %g, approx_marginal_ll_new = %g", it, approx_marginal_ll, approx_marginal_ll_new);///Only for debugging if (it == 0) { if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration terminate_optim = true; } } else { if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { terminate_optim = true; } } if (terminate_optim) { if (approx_marginal_ll_new < approx_marginal_ll) { Log::REDebug(NO_INCREASE_IN_MLL_WARNING_); } approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end loop for finding mode if (it == MAXIT_MODE_NEWTON_) { Log::REDebug(NO_CONVERGENCE_WARNING_); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); diag_sqrt_ZtWZ.setZero(); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt(); Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal(); CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt); approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("a"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("a[%d]: %g", i, a_vec_[i]); //} //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: TOTAL TIME for mode calculation: %g", el_time);// Only for debugging }//end FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } sp_mat_t Z = Zt.transpose(); vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; sp_mat_t SigmaI_plus_ZtWZ; vec_t rhs; // Start finding mode int it; bool terminate_optim = false; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate Cholesky factor and update mode rhs = Zt * first_deriv_ll_ - SigmaI * mode_;//right hand side for updating mode SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z; SigmaI_plus_ZtWZ.makeCompressed(); if (!chol_fact_pattern_analyzed_) { chol_fact_SigmaI_plus_ZtWZ_grouped_.analyzePattern(SigmaI_plus_ZtWZ); chol_fact_pattern_analyzed_ = true; } chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ); mode_ += chol_fact_SigmaI_plus_ZtWZ_grouped_.solve(rhs); // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) location_par = Z * mode_; if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) { Log::REDebug(NA_OR_INF_WARNING_); break; } if (it == 0) { if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration terminate_optim = true; } } else { if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { terminate_optim = true; } } if (terminate_optim) { if (approx_marginal_ll_new < approx_marginal_ll) { Log::REDebug(NO_INCREASE_IN_MLL_WARNING_); } approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end mode finding algorithm if (it == MAXIT_MODE_NEWTON_) { Log::REDebug(NO_CONVERGENCE_WARNING_); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z; SigmaI_plus_ZtWZ.makeCompressed(); chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ); approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL()).diagonal().array().log().sum() + 0.5 * SigmaI.diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLGroupedRE"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //double approx_marginal_ll_1 = -0.5 * (mode_.dot(SigmaI * mode_)); //double approx_marginal_ll_2 = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); //double approx_marginal_ll_3 = 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() - 0.5 * SigmaI.diagonal().array().log().sum(); //Log::REInfo("approx_marginal_ll_1: %g", approx_marginal_ll_1); //Log::REInfo("approx_marginal_ll_2: %g", approx_marginal_ll_2); //Log::REInfo("approx_marginal_ll_3: %g", approx_marginal_ll_3); //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLGroupedRE /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; vec_t rhs; diag_SigmaI_plus_ZtWZ_ = vec_t(num_re_); // Start finding mode int it; bool terminate_optim = false; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate rhs for mode update rhs = - mode_ / sigma2;//right hand side for updating mode #pragma omp parallel { vec_t rhs_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { rhs[i_re] += rhs_private[i_re]; } }//end omp critical }//end omp parallel // Update mode diag_SigmaI_plus_ZtWZ_.setZero(); #pragma omp parallel { vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2; mode_ += (rhs.array() / diag_SigmaI_plus_ZtWZ_.array()).matrix(); // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) { Log::REDebug(NA_OR_INF_WARNING_); break; } if (it == 0) { if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration terminate_optim = true; } } else { if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { terminate_optim = true; } } if (terminate_optim) { if (approx_marginal_ll_new < approx_marginal_ll) { Log::REDebug(NO_INCREASE_IN_MLL_WARNING_); } approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end mode finding algorithm if (it == MAXIT_MODE_NEWTON_) { Log::REDebug(NO_CONVERGENCE_WARNING_); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); diag_SigmaI_plus_ZtWZ_.setZero(); #pragma omp parallel { vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2; approx_marginal_ll -= 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() + 0.5 * num_re_ * std::log(sigma2); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } bool no_fixed_effects = (fixed_effects == nullptr); sp_mat_t SigmaI = B.transpose() * D_inv * B; vec_t location_par;//location parameter = mode of random effects + fixed effects double approx_marginal_ll_new; sp_mat_t SigmaI_plus_W; vec_t rhs, B_mode; // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion B_mode = B * mode_; if (no_fixed_effects) { approx_marginal_ll = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } // Start finding mode int it; bool terminate_optim = false; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } // Calculate Cholesky factor and update mode rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();//right hand side for updating mode SigmaI_plus_W = SigmaI; SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array(); SigmaI_plus_W.makeCompressed(); //Calculation of the Cholesky factor is the bottleneck if (!chol_fact_pattern_analyzed_) { chol_fact_SigmaI_plus_ZtWZ_vecchia_.analyzePattern(SigmaI_plus_W); chol_fact_pattern_analyzed_ = true; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);//This is the bottleneck for large data //Log::REInfo("SigmaI_plus_W: number non zeros = %d", (int)SigmaI_plus_W.nonZeros());//only for debugging //Log::REInfo("chol_fact_SigmaI_plus_ZtWZ: Number non zeros = %d", (int)((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).nonZeros());//only for debugging mode_ = chol_fact_SigmaI_plus_ZtWZ_vecchia_.solve(rhs); // Calculate new objective function B_mode = B * mode_; if (no_fixed_effects) { approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) { Log::REDebug(NA_OR_INF_WARNING_); break; } if (it == 0) { if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration terminate_optim = true; } } else { if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { terminate_optim = true; } } if (terminate_optim) { if (approx_marginal_ll_new < approx_marginal_ll) { Log::REDebug(NO_INCREASE_IN_MLL_WARNING_); } approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } } // end loop for mode finding if (it == MAXIT_MODE_NEWTON_) { Log::REDebug(NO_CONVERGENCE_WARNING_); } if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } SigmaI_plus_W = SigmaI; SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array(); SigmaI_plus_W.makeCompressed(); chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W); approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).diagonal().array().log().sum() + 0.5 * D_inv.diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLVecchia"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLVecchia /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption) * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par;//location parameter = mode of random effects + fixed effects T_mat L_inv_Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) L_inv_Wsqrt.setIdentity(); L_inv_Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood if (no_fixed_effects) { CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data()); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); } ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_Wsqrt); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_Wsqrt);//L_inv_Wsqrt = L\Wsqrt T_mat L_inv_Wsqrt_ZSigmaZt = L_inv_Wsqrt * (*ZSigmaZt); // calculate gradient wrt covariance parameters if (calc_cov_grad) { T_mat WI_plus_Sigma_inv = L_inv_Wsqrt.transpose() * L_inv_Wsqrt;//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1 // calculate gradient of approx. marginal log-likelihood wrt the mode // note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_Wsqrt_ZSigmaZt^T*L_inv_Wsqrt_ZSigmaZt and (ii) "Z=Id" vec_t d_mll_d_mode = (-0.5 * ((*ZSigmaZt).diagonal() - ((T_mat)(L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt)).diagonal()).array() * third_deriv.array()).matrix(); vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par int par_count = 0; double explicit_derivative; for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) { for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) { std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.); // calculate explicit derivative of approx. mariginal log-likelihood explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (WI_plus_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood v_aux = (*SigmaDeriv) * first_deriv_ll_; d_mode_d_par = (v_aux.array() - ((*ZSigmaZt) * WI_plus_Sigma_inv * v_aux).array()).matrix(); cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); par_count++; } } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < par_count; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { T_mat L_inv_Wsqrt_ZSigmaZt_sqr = L_inv_Wsqrt_ZSigmaZt.cwiseProduct(L_inv_Wsqrt_ZSigmaZt); vec_t ZSigmaZtI_plus_W_inv_diag = (*ZSigmaZt).diagonal() - L_inv_Wsqrt_ZSigmaZt_sqr.transpose() * vec_t::Ones(L_inv_Wsqrt_ZSigmaZt_sqr.rows());// diagonal of (ZSigmaZt^-1 + W) ^ -1 vec_t d_mll_d_mode = (-0.5 * ZSigmaZtI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here vec_t L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode = L_inv_Wsqrt_ZSigmaZt * d_mll_d_mode;// for implicit derivative vec_t ZSigmaZtI_plus_W_inv_d_mll_d_mode = (*ZSigmaZt) * d_mll_d_mode - L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode; vec_t d_mll_d_F_implicit = (ZSigmaZtI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_F_implicit; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxStable /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using * a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt). * This version is used for the Laplace approximation when there is only one Gaussian process and * there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption) * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t* const random_effects_indices_of_data, const std::vector<std::shared_ptr<RECompBase<T_mat>>> & re_comps_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t & fixed_effect_grad, bool calc_mode = false) { //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; CHECK(re_comps_cluster_i.size() == 1); if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, Sigma, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Matrix ZtWZsqrt vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel T_mat L_inv_ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ L_inv_ZtWZsqrt.setIdentity(); L_inv_ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt(); vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); vec_t diag_ZtThirdDerivZ(num_re_);//sqrt of diagonal matrix ZtWZ diag_ZtThirdDerivZ.setZero(); #pragma omp parallel { vec_t diag_ZtThirdDerivZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_ZtThirdDerivZ_private[random_effects_indices_of_data[i]] += third_deriv[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtThirdDerivZ[i_re] += diag_ZtThirdDerivZ_private[i_re]; } }//end omp critical }//end omp parallel ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_ZtWZsqrt); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_ZtWZsqrt);//L_inv_ZtWZsqrt = L\ZtWZsqrt //This is the bottleneck (in this first part) for large data when using sparse matrices T_mat L_inv_ZtWZsqrt_Sigma = L_inv_ZtWZsqrt * (*Sigma); ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt));//Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt_Sigma: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt_Sigma));//Only for debugging // calculate gradient wrt covariance parameters if (calc_cov_grad) { vec_t ZtFirstDeriv(num_re_);//sqrt of diagonal matrix ZtWZ ZtFirstDeriv.setZero(); #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel T_mat ZtWZI_Sigma_inv = L_inv_ZtWZsqrt.transpose() * L_inv_ZtWZsqrt;//ZtWZI_Sigma_inv = ZtWZsqrt * L^T\(L\ZtWZsqrt) = ((ZtWZ)^-1 + Sigma)^-1 // calculate gradient of approx. marginal log-likelihood wrt the mode // note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_ZtWZsqrt_Sigma^T*L_inv_ZtWZsqrt_Sigma vec_t d_mll_d_mode = (-0.5 * ((*Sigma).diagonal() - ((T_mat)(L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma)).diagonal()).array() * diag_ZtThirdDerivZ.array()).matrix(); vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par int par_count = 0; double explicit_derivative; for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) { for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) { std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.); // calculate explicit derivative of approx. mariginal log-likelihood explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (ZtWZI_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood v_aux = (*SigmaDeriv) * ZtFirstDeriv; d_mode_d_par = (v_aux.array() - ((*Sigma) * ZtWZI_Sigma_inv * v_aux).array()).matrix(); cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); par_count++; } } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < par_count; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { T_mat L_inv_ZtWZsqrt_Sigma_sqr = L_inv_ZtWZsqrt_Sigma.cwiseProduct(L_inv_ZtWZsqrt_Sigma); vec_t SigmaI_plus_ZtWZ_inv_diag = (*Sigma).diagonal() - L_inv_ZtWZsqrt_Sigma_sqr.transpose() * vec_t::Ones(L_inv_ZtWZsqrt_Sigma_sqr.rows());// diagonal of (Sigma^-1 + ZtWZ) ^ -1 vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_ZtWZ_inv_diag.array() * diag_ZtThirdDerivZ.array()).matrix();// gradient of approx. marginal likelihood wrt the mode vec_t L_inv_ZtWZsqrt_Sigma_d_mll_d_mode = L_inv_ZtWZsqrt_Sigma * d_mll_d_mode;// for implicit derivative vec_t SigmaI_plus_ZtWZ_inv_d_mll_d_mode = (*Sigma) * d_mll_d_mode - L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma_d_mll_d_mode; fixed_effect_grad = -first_deriv_ll_; #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { fixed_effect_grad[i] += -0.5 * third_deriv[i] * SigmaI_plus_ZtWZ_inv_diag[random_effects_indices_of_data[i]] - second_deriv_neg_ll_[i] * SigmaI_plus_ZtWZ_inv_d_mll_d_mode[random_effects_indices_of_data[i]]; } }//end calc_F_grad //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: TOTAL TIME: %g", el_time);// Only for debugging }//end CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, std::vector<data_size_t> cum_num_rand_eff_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { int num_REs = (int)SigmaI.cols();//number of random effect realizations int num_comps = (int)cum_num_rand_eff_cluster_i.size() - 1;//number of different random effect components if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables sp_mat_t Z = Zt.transpose(); vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); // Calculate (Sigma^-1 + Zt*W*Z)^-1 sp_mat_t L_inv(num_REs, num_REs); L_inv.setIdentity(); if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering L_inv = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * L_inv; } chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(L_inv); sp_mat_t SigmaI_plus_ZtWZ_inv = L_inv.transpose() * L_inv; // calculate gradient of approx. marginal likeligood wrt the mode //Note: the calculation of d_mll_d_mode is the bottleneck of this function (corresponding lines below are indicated with * and, in particular, **) vec_t d_mll_d_mode(num_REs); sp_mat_t Zt_third_deriv = Zt * third_deriv.asDiagonal();//every column of Z multiplied elementwise by third_deriv #pragma omp parallel for schedule(static) for (int i = 0; i < num_REs; ++i) { vec_t diag_d_W_d_mode_i = Zt_third_deriv.row(i);//*can be slow //calculate Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z sp_mat_t Zt_d_W_d_mode_i_Z = (Zt * diag_d_W_d_mode_i.asDiagonal() * Z).pruned();//**can be very slow. Note that this is also slow when the middle diagonal matrix is a pruned sparse matrix ////Variant 2: slower //sp_mat_t Zt_third_deriv_diag = sp_mat_t(((vec_t)Zt_third_deriv.row(i)).asDiagonal()); //sp_mat_t Zt_d_W_d_mode_i_Z = Zt * Zt_third_deriv_diag * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z ////Variant 3: slower //vec_t Z_i = Z.col(i);// column number i of Z //vec_t diag_d_W_d_mode_i = (Z_i.array() * third_deriv.array()).matrix();//diagonal of derivative of matrix W wrt random effect number i //sp_mat_t Zt_d_W_d_mode_i_Z = Zt * diag_d_W_d_mode_i.asDiagonal() * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z d_mll_d_mode[i] = -0.5 * (Zt_d_W_d_mode_i_Z.cwiseProduct(SigmaI_plus_ZtWZ_inv)).sum(); } // calculate gradient wrt covariance parameters if (calc_cov_grad) { sp_mat_t ZtWZ = Zt * second_deriv_neg_ll_.asDiagonal() * Z; vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par vec_t SigmaI_mode = SigmaI * mode_; double explicit_derivative; sp_mat_t I_j(num_REs, num_REs);//Diagonal matrix with 1 on the diagonal for all random effects of component j and 0's otherwise sp_mat_t I_j_ZtWZ; for (int j = 0; j < num_comps; ++j) { // calculate explicit derivative of approx. mariginal log-likelihood std::vector<Triplet_t> triplets;//for constructing I_j triplets.reserve(cum_num_rand_eff_cluster_i[j + 1] - cum_num_rand_eff_cluster_i[j]); explicit_derivative = 0.; for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { triplets.emplace_back(i, i, 1.); explicit_derivative += SigmaI_mode[i] * mode_[i]; } // Altervative version using parallelization (not faster) //#pragma omp parallel // { // std::vector<Triplet_t> triplets_private; // //triplets_private.reserve(cum_num_rand_eff_cluster_i[num_comps]); //#pragma omp for nowait reduction(+:explicit_derivative) // for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { // triplets_private.emplace_back(i, i, 1.); // explicit_derivative += SigmaI_mode[i] * mode_[i]; // } //#pragma omp critical // triplets.insert(triplets.end(), triplets_private.begin(), triplets_private.end()); // } //#pragma omp parallel for schedule(static) reduction(+:explicit_derivative) // for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { // explicit_derivative += SigmaI_mode[i] * mode_[i]; // } explicit_derivative *= -0.5; I_j.setFromTriplets(triplets.begin(), triplets.end()); I_j_ZtWZ = I_j * ZtWZ; explicit_derivative += 0.5 * (SigmaI_plus_ZtWZ_inv.cwiseProduct(I_j_ZtWZ)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood d_mode_d_par = SigmaI_plus_ZtWZ_inv * I_j * Zt * first_deriv_ll_; cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); } ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE"); //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < num_comps; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { vec_t d_detmll_d_F(num_data); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data; ++i) { sp_mat_t zi_zit = Zt.col(i) * Z.row(i);//=Z.row(i) * (Z.row(i)).transpose() d_detmll_d_F[i] = -0.5 * third_deriv[i] * (SigmaI_plus_ZtWZ_inv.cwiseProduct(zi_zit)).sum(); } vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * SigmaI_plus_ZtWZ_inv * Zt * second_deriv_neg_ll_.asDiagonal(); fixed_effect_grad = -first_deriv_ll_ + d_detmll_d_F - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxGroupedRE /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, sigma2, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); // calculate gradient of approx. marginal likeligood wrt the mode vec_t d_mll_d_mode = vec_t::Zero(num_re_); #pragma omp parallel { vec_t third_deriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { third_deriv_private[random_effects_indices_of_data[i]] += third_deriv[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { d_mll_d_mode[i_re] += third_deriv_private[i_re]; } }//end omp critical }//end omp parallel d_mll_d_mode.array() /= -2. * diag_SigmaI_plus_ZtWZ_.array(); // calculate gradient wrt covariance parameters if (calc_cov_grad) { vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ[i_re] += diag_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel double explicit_derivative = -0.5 * (mode_.array() * mode_.array()).sum() / sigma2 + 0.5 * (diag_ZtWZ.array() / diag_SigmaI_plus_ZtWZ_.array()).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood vec_t d_mode_d_par = vec_t::Zero(num_re_); #pragma omp parallel { vec_t first_deriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { first_deriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { d_mode_d_par[i_re] += first_deriv_private[i_re]; } }//end omp critical }//end omp parallel d_mode_d_par.array() /= diag_SigmaI_plus_ZtWZ_.array(); cov_grad[0] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale"); //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad[0]: %g", cov_grad[0]); }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data; ++i) { fixed_effect_grad[i] = -first_deriv_ll_[i] - 0.5 * third_deriv[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]] - //=d_detmll_d_F d_mll_d_mode[random_effects_indices_of_data[i]] * second_deriv_neg_ll_[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]];//=implicit derivative = d_mll_d_mode * d_mode_d_F } ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]); //} }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param B_grad Derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation * \param D_grad Derivatives of matrices D for Vecchia approximation * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, const std::vector<sp_mat_t>& B_grad, const std::vector<sp_mat_t>& D_grad, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par;//location parameter = mode of random effects + fixed effects vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood if (no_fixed_effects) { CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data()); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); } // Calculate (Sigma^-1 + W)^-1 sp_mat_t L_inv(num_data, num_data); L_inv.setIdentity(); if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering L_inv = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * L_inv; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(L_inv); // calculate gradient wrt covariance parameters if (calc_cov_grad) { sp_mat_t SigmaI_plus_W_inv = L_inv.transpose() * L_inv;//Note: this is the computational bottleneck for large data vec_t d_mll_d_mode = -0.5 * (SigmaI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();// gradient of approx. marginal likeligood wrt the mode vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter double explicit_derivative; int num_par = (int)B_grad.size(); sp_mat_t SigmaI_deriv; sp_mat_t BgradT_Dinv_B; sp_mat_t Bt_Dinv_Bgrad; for (int j = 0; j < num_par; ++j) { SigmaI_deriv = B_grad[j].transpose() * D_inv * B; Bt_Dinv_Bgrad = SigmaI_deriv.transpose(); SigmaI_deriv += Bt_Dinv_Bgrad - B.transpose() * D_inv * D_grad[j] * D_inv * B; d_mode_d_par = -SigmaI_plus_W_inv * SigmaI_deriv * mode_; explicit_derivative = 0.5 * mode_.dot(SigmaI_deriv * mode_) + 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum()); // Alternative version (not faster) //vec_t u = D_inv * B * mode_; //vec_t uk = B_grad[j] * mode_; //explicit_derivative = uk.dot(u) - 0.5 * u.dot(D_grad[j] * u) + // 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum()); cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < num_par; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { sp_mat_t L_inv_sqr = L_inv.cwiseProduct(L_inv); vec_t SigmaI_plus_W_inv_diag = L_inv_sqr.transpose() * vec_t::Ones(L_inv_sqr.rows());// diagonal of (Sigma^-1 + W) ^ -1 vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here vec_t L_inv_d_mll_d_mode = L_inv * d_mll_d_mode;// for implicit derivative vec_t SigmaI_plus_W_inv_d_mll_d_mode = L_inv.transpose() * L_inv_d_mll_d_mode; vec_t d_mll_d_F_implicit = -(SigmaI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode + d_mll_d_F_implicit; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxVecchia /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) Wsqrt.setIdentity(); Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); T_mat Maux = Wsqrt * Cross_Cov.transpose(); ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov -= Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] -= Maux.col(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxStable"); //for (int i = 0; i < 3; ++i) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]); //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxStable /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t* const random_effects_indices_of_data, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, Sigma, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel pred_mean = Cross_Cov * ZtFirstDeriv; if (calc_pred_cov || calc_pred_var) { vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ ZtWZsqrt.setIdentity(); ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt(); T_mat Maux = ZtWZsqrt * Cross_Cov.transpose(); ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);//Maux = L\(ZtWZsqrt * Cross_Cov^T) if (calc_pred_cov) { pred_cov -= Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] -= Maux.col(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxOnlyOneGPCalculationsOnREScale"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxOnlyOneGPCalculationsOnREScale /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { // calculate Maux = L\(Z^T * second_deriv_neg_ll_.asDiagonal() * Cross_Cov^T) T_mat Maux = Zt * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose(); if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering Maux = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * Maux; } chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov += Maux.transpose() * Maux - (T_mat)(Cross_Cov * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose()); } if (calc_pred_var) { T_mat Maux3 = Cross_Cov.cwiseProduct(Cross_Cov * second_deriv_neg_ll_.asDiagonal()); Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux.col(i).sum() - Maux3.row(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxGroupedRE"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(), 3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxGroupedRE /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, sigma2, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel pred_mean = Cross_Cov * ZtFirstDeriv; vec_t diag_Sigma_plus_ZtWZI = vec_t(num_re_); diag_Sigma_plus_ZtWZI.array() = 1. / diag_SigmaI_plus_ZtWZ_.array(); diag_Sigma_plus_ZtWZI.array() /= sigma2; diag_Sigma_plus_ZtWZI.array() -= 1.; diag_Sigma_plus_ZtWZI.array() /= sigma2; if (calc_pred_cov) { T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal() * Cross_Cov.transpose(); pred_cov += Maux; } if (calc_pred_var) { T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal(); T_mat Maux2 = Cross_Cov.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux2.row(i).sum(); } } ////Only for debugging //Log::REInfo("PredictLAApproxOnlyOneGroupedRECalculationsOnREScale"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxOnlyOneGroupedRECalculationsOnREScale /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { T_mat SigmaI_CrossCovT = B.transpose() * D_inv * B * Cross_Cov.transpose(); T_mat Maux = SigmaI_CrossCovT; //Maux = L\(Sigma^-1 * Cross_Cov^T), L = Chol(Sigma^-1 + W) if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering Maux = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * Maux; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov += -Cross_Cov * SigmaI_CrossCovT + Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux.col(i).sum() - (Cross_Cov.row(i)).dot(SigmaI_CrossCovT.col(i)); } } } }//end PredictLAApproxVecchia /*! * \brief Make predictions for the response variable (label) based on predictions for the mean and variance of the latent random effects * \param pred_mean[out] Predicted mean of latent random effects. The predicted mean for the response variables is written on this * \param pred_var[out] Predicted variances of latent random effects. The predicted variance for the response variables is written on this * \param predict_var If true, predictive variances are also calculated */ void PredictResponse(vec_t& pred_mean, vec_t& pred_var, bool predict_var = false) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_mean[i] = normalCDF(pred_mean[i] / std::sqrt(1. + pred_var[i])); } if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] = pred_mean[i] * (1. - pred_mean[i]); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_mean[i] = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); } if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] = pred_mean[i] * (1. - pred_mean[i]); } } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); if (predict_var) { double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]); pred_var[i] = psm - pm * pm + pm; } pred_mean[i] = pm; } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); if (predict_var) { double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]); pred_var[i] = psm - pm * pm + psm / aux_pars_[0]; } pred_mean[i] = pm; } } } /*! * \brief Adaptive GH quadrature to calculate predictive mean of response variable * \param latent_mean Predicted mean of latent random effects * \param latent_var Predicted variances of latent random effects */ double RespMeanAdaptiveGHQuadrature(const double latent_mean, const double latent_var) { // Find mode of integrand double mode_integrand, mode_integrand_last, update; mode_integrand = 0.; double sigma2_inv = 1. / latent_var; double sqrt_sigma2_inv = std::sqrt(sigma2_inv); for (int it = 0; it < 100; ++it) { mode_integrand_last = mode_integrand; update = (FirstDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv * (mode_integrand - latent_mean)) / (SecondDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv); mode_integrand -= update; if (std::abs(update) / std::abs(mode_integrand_last) < DELTA_REL_CONV_) { break; } } // Adaptive GH quadrature double sqrt2_sigma_hat = M_SQRT2 / std::sqrt(-SecondDerivLogCondMeanLikelihood(mode_integrand) + sigma2_inv); double x_val; double mean_resp = 0.; for (int j = 0; j < order_GH_; ++j) { x_val = sqrt2_sigma_hat * GH_nodes_[j] + mode_integrand; mean_resp += adaptive_GH_weights_[j] * CondMeanLikelihood(x_val) * normalPDF(sqrt_sigma2_inv * (x_val - latent_mean)); } mean_resp *= sqrt2_sigma_hat * sqrt_sigma2_inv; return mean_resp; } template <typename T>//T can be double or float bool AreSame(const T a, const T b) const { return fabs(a - b) < a * EPSILON_; } private: /*! \brief Number of data points */ data_size_t num_data_; /*! \brief Number (dimension) of random effects */ data_size_t num_re_; /*! \brief Posterior mode used for Laplace approximation */ vec_t mode_; /*! \brief Saving a previously found value allows for reseting the mode when having a too large step size. */ vec_t mode_previous_value_; /*! \brief Auxiliary variable a=ZSigmaZt^-1 mode_b used for Laplace approximation */ vec_t a_vec_; /*! \brief Saving a previously found value allows for reseting the mode when having a too large step size. */ vec_t a_vec_previous_value_; /*! \brief Indicates whether the vector a_vec_ / a=ZSigmaZt^-1 is used or not */ bool has_a_vec_; /*! \brief First derivatives of the log-likelihood */ vec_t first_deriv_ll_; /*! \brief Second derivatives of the negative log-likelihood (diagonal of matrix "W") */ vec_t second_deriv_neg_ll_; /*! \brief Diagonal of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'GroupedRE' when there is only one random effect and ZtWZ is diagonal. Otherwise 'diag_SigmaI_plus_ZtWZ_' is used for grouped REs) */ vec_t diag_SigmaI_plus_ZtWZ_; /*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version'GroupedRE' if there is more than one random effect). */ chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_grouped_; /*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'Vecchia') */ chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_vecchia_; //Note: chol_sp_mat_AMDOrder_t (AMD permutation) is faster than chol_sp_mat_t (no permutation) for the Vecchia approcimation but for the grouped random effects the difference is small. // chol_sp_mat_COLAMDOrder_t is slower than no ordering or chol_sp_mat_AMDOrder_t for both grouped random effects and the Vecchia approximation /*! * \brief Cholesky factors of matrix B = I + Wsqrt * Z * Sigma * Zt * Wsqrt in Laplace approximation (for version 'Stable') * or of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt (for version 'OnlyOneGPCalculationsOnREScale') */ T_chol chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_; /*! \brief If true, the pattern for the Cholesky factor (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, chol_fact_SigmaI_plus_ZtWZ_grouped_, or chol_fact_SigmaI_plus_ZtWZ_vecchia_) has been analyzed */ bool chol_fact_pattern_analyzed_ = false; /*! \brief If true, the mode has been initialized to 0 */ bool mode_initialized_ = false; /*! \brief If true, the mode has been determined */ bool mode_has_been_calculated_ = false; /*! \brief If true, the function 'CheckY' has been called */ bool normalizing_constant_has_been_calculated_ = false; /*! \brief Normalizing constant for likelihoods (not all likelihoods have one) */ double log_normalizing_constant_; /*! \brief Type of likelihood */ string_t likelihood_type_ = "gaussian"; /*! \brief List of supported covariance likelihoods */ const std::set<string_t> SUPPORTED_LIKELIHOODS_{ "gaussian", "bernoulli_probit", "bernoulli_logit", "poisson", "gamma" }; /*! \brief Tolerance level when comparing two doubles for equality */ double EPSILON_ = 1e-6; /*! \brief Maximal number of iteration done for finding posterior mode with Newton's method */ int MAXIT_MODE_NEWTON_ = 1000; /*! \brief Used for cheking convergence in mode finding algorithm (terminate if relative change in Laplace approx. is below this value) */ double DELTA_REL_CONV_ = 1e-6; /*! \brief Additional parameters for likelihoods. For gamma, auxiliary_pars_[0] = shape parameter */ std::vector<double> aux_pars_; string_t ParseLikelihoodAlias(const string_t& likelihood) { if (likelihood == string_t("binary") || likelihood == string_t("bernoulli_probit") || likelihood == string_t("binary_probit")) { return "bernoulli_probit"; } else if (likelihood == string_t("gaussian") || likelihood == string_t("regression")) { return "gaussian"; } return likelihood; } /*! \brief Order of the Gauss-Hermite quadrature */ int order_GH_ = 30; /*! \brief Nodes and weights for the Gauss-Hermite quadrature */ // Source: https://keisan.casio.com/exec/system/1281195844 const std::vector<double> GH_nodes_ = { -6.863345293529891581061, -6.138279220123934620395, -5.533147151567495725118, -4.988918968589943944486, -4.48305535709251834189, -4.003908603861228815228, -3.544443873155349886925, -3.099970529586441748689, -2.667132124535617200571, -2.243391467761504072473, -1.826741143603688038836, -1.415527800198188511941, -1.008338271046723461805, -0.6039210586255523077782, -0.2011285765488714855458, 0.2011285765488714855458, 0.6039210586255523077782, 1.008338271046723461805, 1.415527800198188511941, 1.826741143603688038836, 2.243391467761504072473, 2.667132124535617200571, 3.099970529586441748689, 3.544443873155349886925, 4.003908603861228815228, 4.48305535709251834189, 4.988918968589943944486, 5.533147151567495725118, 6.138279220123934620395, 6.863345293529891581061 }; const std::vector<double> GH_weights_ = { 2.908254700131226229411E-21, 2.8103336027509037088E-17, 2.87860708054870606219E-14, 8.106186297463044204E-12, 9.1785804243785282085E-10, 5.10852245077594627739E-8, 1.57909488732471028835E-6, 2.9387252289229876415E-5, 3.48310124318685523421E-4, 0.00273792247306765846299, 0.0147038297048266835153, 0.0551441768702342511681, 0.1467358475408900997517, 0.2801309308392126674135, 0.386394889541813862556, 0.3863948895418138625556, 0.2801309308392126674135, 0.1467358475408900997517, 0.0551441768702342511681, 0.01470382970482668351528, 0.002737922473067658462989, 3.48310124318685523421E-4, 2.938725228922987641501E-5, 1.579094887324710288346E-6, 5.1085224507759462774E-8, 9.1785804243785282085E-10, 8.10618629746304420399E-12, 2.87860708054870606219E-14, 2.81033360275090370876E-17, 2.9082547001312262294E-21 }; const std::vector<double> adaptive_GH_weights_ = { 0.83424747101276179534, 0.64909798155426670071, 0.56940269194964050397, 0.52252568933135454964, 0.491057995832882696506, 0.46837481256472881677, 0.45132103599118862129, 0.438177022652683703695, 0.4279180629327437485828, 0.4198950037368240886418, 0.413679363611138937184, 0.4089815750035316024972, 0.4056051233256844363121, 0.403419816924804022553, 0.402346066701902927115, 0.4023460667019029271154, 0.4034198169248040225528, 0.4056051233256844363121, 0.4089815750035316024972, 0.413679363611138937184, 0.4198950037368240886418, 0.427918062932743748583, 0.4381770226526837037, 0.45132103599118862129, 0.46837481256472881677, 0.4910579958328826965056, 0.52252568933135454964, 0.56940269194964050397, 0.64909798155426670071, 0.83424747101276179534 }; const char* NA_OR_INF_WARNING_ = "Mode finding algorithm for Laplace approximation: NA or Inf occurred. This is not necessary a problem as it might have been the cause of a too large learning rate which, consequently, has been decreased by the algorithm"; const char* NO_INCREASE_IN_MLL_WARNING_ = "Mode finding algorithm for Laplace approximation: The approximate marginal log-likelihood (=convergence criterion) has decreased and the algorithm has thus been terminated."; const char* NO_CONVERGENCE_WARNING_ = "Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"; /*! \brief Get number of non-zero entries in matrix */ template <class T_mat1, typename std::enable_if< std::is_same<sp_mat_t, T_mat1>::value>::type * = nullptr > int GetNumberNonZeros(T_mat1 M) { return((int)M.nonZeros()); }; template <class T_mat1, typename std::enable_if< std::is_same<den_mat_t, T_mat1>::value>::type * = nullptr > int GetNumberNonZeros(T_mat1 M) { return((int)M.cols() * M.rows()); }; };//end class Likelihood } // namespace GPBoost #endif // GPB_LIKELIHOODS_
levinson.c
#ifndef _OPENMP #define STRING2(x) #x #define STRING(x) STRING2(x) #pragma message (__FILE__ "(" STRING(__LINE__) "): error: This module should be compiled with /openmp on the command line") /* Generate a compiler error to stop the build */ mustLinkOpenMP #endif #if defined(_MSC_VER) // Microsoft #define EXPORT __declspec(dllexport) #define IMPORT __declspec(dllimport) #elif defined(_GCC) // GCC #define EXPORT __attribute__((visibility("default"))) #define IMPORT #else #define EXPORT #define IMPORT // #pragma warning Unknown dynamic link import/export semantics. #endif EXPORT int nlevinson(const double* in, int nsamp, double* acoeff, double* tmp) { int i, j; double err, norm; int ret = 0; acoeff[0]=1/in[0]; for (j=1; (j<nsamp); j++) { err=0; for (i=0; (i<j); i++) { err=err+acoeff[i]*in[j-i]; } norm=1/(1-err*err); for (i=0; (i<j+1); i++) { tmp[i]=norm*(acoeff[i]-err*acoeff[j-i]); } for (i=0; (i<j+1); i++) { acoeff[i]=tmp[i]; } } return ret; } EXPORT int nlevinson_mp(const double* in, int nchan, int nsamp, double* acoeff, double* tmp) { int smp, ch; int ret = 0; #pragma omp parallel for private(ch, smp) for (ch = 0; ch < nchan; ch++) { smp = ch*nsamp; nlevinson(&in[smp], nsamp, &acoeff[smp], &tmp[smp]); } return ret; } EXPORT int levinson(const double* in, int order, double* acoeff, double* err, double* kcoeff, double* tmp) { int i, j; double acc; int ret = 0; /* order 0 */ acoeff[0] = (double)1.0; *err = in[0]; /* order >= 1 */ for (i = 1; i <= order; ++i) { acc = in[i]; for ( j = 1; j <= i-1; ++j) { acc += acoeff[j]*in[i-j]; } kcoeff[i-1] = -acc/(*err); acoeff[i] = kcoeff[i-1]; for (j = 0; j < order; ++j) { tmp[j] = acoeff[j]; } for (j = 1; j < i; ++j) { acoeff[j] += kcoeff[i-1]*tmp[i-j]; } *err *= (1-kcoeff[i-1]*kcoeff[i-1]); } return ret; } EXPORT int levinson_mp(const double* in, int nchan, int nsamp, int order, double* acoeff, double* err, double* kcoeff, double* tmp) { int ch, dn, an, kn; int ret = 0; #pragma omp parallel for private(ch, dn, an, kn) for (ch = 0; ch < nchan; ch++) { dn = ch*nsamp; an = ch*(order+1); kn = ch*order; levinson(&in[dn], order, &acoeff[an], &err[ch], &kcoeff[kn], &tmp[kn]); } return ret; }
matrices_1d_openmp.c
#define MATRICES_1D_OPEN_MP #ifdef MATRICES_1D_OPEN_MP /* FINAL VERSION * Functions don't allocate arrays that they return. */ /* Matrices are represented as 1-D arrays in memory. * That means they are contiguous in memory, flat arrays. * Minimum dimension is 1, not 0, and internal dimensions must match. */ /* All functions use restricted pointers, so care should be taken * to make sure that arrays that they point to do not overlap, if * we want to modify them inside of the functions. * On the other hand, it's easy to change type of the pointers * from restricted to non-restricted versions, by using definitions * given at the beginning of the corresponding "matrices_1d.h" * header file, if necessary. */ /* Uses tiles to speed up computations, by using cache efficiently. * This makes sense when working with matrices; in particular, with * operations that traverse columns, like dot () and transpose(). */ #include "matrices_1d.h" #include "tests.h" /* Initializes vector or matrix with sequentially growing data_t values, starting from 0. */ void init_seq(data_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a) { int i = 0, j = 0; #pragma omp parallel for default(none) private(i, j) shared(a, n_rows_a, n_cols_a) schedule(static) for (i = 0; i < (int)n_rows_a; i++) { for (j = 0; j < (int)n_cols_a; j++) { a[i*n_cols_a + j] = i*n_cols_a + j; } } } /* Initializes vector or matrix with sequentially growing data_t values, starting from 0. */ void init_seq_tiled(data_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a) { int i = 0, j = 0, it = 0, jt = 0; #pragma omp parallel for default(none) private(i, j, it, jt) shared(a, n_rows_a, n_cols_a) schedule(static) for (i = 0; i < (int)n_rows_a; i += TILE_ORDER) { for (j = 0; j < (int)n_cols_a; j += TILE_ORDER) { for (it = i; it < MIN((int)n_rows_a, i + TILE_ORDER); it++) { for (jt = j; jt < MIN((int)n_cols_a, j + TILE_ORDER); jt++) { a[it*n_cols_a + jt] = it*n_cols_a + jt; } } } } } /* Initializes vector or matrix, with random data_t values in the range [0, 1]. Lot slower than init_seq(), which is expected, since it calls rand(). */ void init_rand(data_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a) { int i = 0, j = 0; /* Schedule should be either guided or dynamic; if it's static or runtime, the random numbers may repeat. But, if working with tiles, it might not work with guided or dynamic, but also should have less problems with repeating values. */ #pragma omp parallel for default(none) private(i, j) shared(a, n_rows_a, n_cols_a) schedule(static) for (i = 0; i < (int)n_rows_a; i++) { for (j = 0; j < (int)n_cols_a; j++) { a[i*n_cols_a + j] = rand() / (data_t)RAND_MAX; } } } /* Initializes vector or matrix, with random data_t values in the range [0, 1]. Lot slower than init_seq(), which is expected, since it calls rand(). */ void init_rand_tiled(data_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a) { int i = 0, j = 0, it = 0, jt = 0; /* Schedule should be either guided or dynamic; if it's static or runtime, the random numbers may repeat. But, if working with tiles, it might not work with guided or dynamic, but also should have less problems with repeating values. */ #pragma omp parallel for default(none) private(i, j, it, jt) shared(a, n_rows_a, n_cols_a) schedule(static) for (i = 0; i < (int)n_rows_a; i += TILE_ORDER) { for (j = 0; j < (int)n_cols_a; j += TILE_ORDER) { for (it = i; it < MIN((int)n_rows_a, i + TILE_ORDER); it++) { for (jt = j; jt < MIN((int)n_cols_a, j + TILE_ORDER); jt++) { a[it*n_cols_a + jt] = rand() / (data_t)RAND_MAX; } } } } } /* Sum of an array */ data_t sum_array(cdata_ptr_res_t arr, const unsigned length) { data_t sum = 0.; int i = 0; #pragma omp parallel for default(none) private (i) shared(arr, length) reduction(+:sum) schedule(static) for (i = 0; i < (int)length; i++) { sum += arr[i]; } return sum; } /* Sum of an array */ data_t sum_array_tiled(cdata_ptr_res_t arr, const unsigned length) { data_t sum = 0.; int i = 0, it = 0; #pragma omp parallel for default(none) private (i, it) shared(arr, length) reduction(+:sum) schedule(static) for (i = 0; i < (int)length; i += TILE_ORDER) { for (it = i; it < MIN((int)length, i + TILE_ORDER); it++) { sum += arr[it]; } } return sum; } /* Mean value of an array */ data_t mean(cdata_ptr_res_t arr, const unsigned length) { data_t sum = 0.; int i = 0; #pragma omp parallel for default(none) private (i) shared(arr, length) reduction(+:sum) schedule(static) for (i = 0; i < (int)length; i++) { sum += arr[i]; } return sum / length; } /* Takes and returns a new matrix, t, which is a transpose of the original one, m. It's also flat in memory, i.e., 1-D, but it should be looked at as a transpose of m, meaning, n_rows_t == n_cols_m, and n_cols_t == n_rows_m. The original matrix m stays intact. */ data_ptr_res_t transpose(cdata_ptr_res_t m, const unsigned n_rows_m, const unsigned n_cols_m, data_ptr_res_t t) { int i = 0, j = 0, it = 0, jt = 0; #pragma omp parallel for default(none) private(i, j, it, jt) shared(m, n_rows_m, n_cols_m, t) schedule(static) for (i = 0; i < (int)n_rows_m; i += TILE_ORDER) { for (j = 0; j < (int)n_cols_m; j += TILE_ORDER) { for (it = i; it < MIN((int)n_rows_m, i + TILE_ORDER); it++) { for (jt = j; jt < MIN((int)n_cols_m, j + TILE_ORDER); jt++) { t[jt*n_rows_m + it] = m[it*n_cols_m + jt]; } } } } return t; } /* Takes and returns a new matrix, t, which is a transpose of the original one, m. It's also flat in memory, i.e., 1-D, but it should be looked at as a transpose of m, meaning, n_rows_t == n_cols_m, and n_cols_t == n_rows_m. The original matrix m stays intact. */ data_ptr_res_t transpose_non_tiled(cdata_ptr_res_t m, const unsigned n_rows_m, const unsigned n_cols_m, data_ptr_res_t t) { int i = 0, j = 0; #pragma omp parallel for default(none) private(i, j) shared(m, n_rows_m, n_cols_m, t) schedule(static) for (i = 0; i < (int)n_rows_m; i++) { for (j = 0; j < (int)n_cols_m; j++) { t[j*n_rows_m + i] = m[i*n_cols_m + j]; } } /* Visual validation - Prints t like m, the original */ const int validate = 0; if (validate) { for (size_t i = 0; i < n_rows_m; i++) { for (size_t j = 0; j < n_cols_m; j++) { printf("%8.3f ", t[j*n_rows_m + i]); } printf("\n"); } printf("\n"); } return t; } /* Dot product of two arrays, a and b, or matrix product * Returns an array that's passed in as the last argument, c. * This is by far the slowest version of the function, sequentially or parallely. */ data_ptr_res_t dot_simple(cdata_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a, \ cdata_ptr_res_t b, const unsigned n_rows_b, const unsigned n_cols_b, data_ptr_res_t c) { /* Check lengths of the input arrays */ if (n_cols_a != n_rows_b) { printf("#columns A must be equal to #rows B!\n"); system("pause"); exit(-2); } int i = 0, j = 0, k = 0; #pragma omp parallel for default(none) private(i, j, k) shared(a, n_rows_a, n_cols_a, b, n_rows_b, n_cols_b, c) schedule(static) for (i = 0; i < (int)n_rows_a; i++) { for (k = 0; k < (int)n_cols_b; k++) { data_t sum = 0.0; for (j = 0; j < (int)n_cols_a; j++) { sum += a[i*n_cols_a + j] * b[j*n_cols_b + k]; } c[i*n_cols_b + k] = sum; } } return c; } /* Dot product of two arrays, a and b, or matrix product * Returns an array that's passed in as the last argument, c. * This is a tiled version of the simple function, and it's much faster than it. */ data_ptr_res_t dot_simple_tiled(cdata_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a, \ cdata_ptr_res_t b, const unsigned n_rows_b, const unsigned n_cols_b, data_ptr_res_t c) { /* Check lengths of the input arrays */ if (n_cols_a != n_rows_b) { printf("#columns A must be equal to #rows B!\n"); system("pause"); exit(-2); } int i = 0, j = 0, k = 0, it = 0, jt = 0, kt = 0; memset(c, 0, n_rows_a * n_cols_b * sizeof(*c)); #pragma omp parallel for default(none) private(i, j, k, it, jt, kt) shared(a, n_rows_a, n_cols_a, b, n_rows_b, n_cols_b, c) schedule(static) for (i = 0; i < (int)n_rows_a; i += TILE_ORDER) { for (k = 0; k < (int)n_cols_b; k += TILE_ORDER) { for (j = 0; j < (int)n_cols_a; j += TILE_ORDER) { for (it = i; it < MIN((int)n_rows_a, i + TILE_ORDER); it++) { for (kt = k; kt < MIN((int)n_cols_b, k + TILE_ORDER); kt++) { data_t sum = 0.0; for (jt = j; jt < MIN((int)n_cols_a, j + TILE_ORDER); jt++) { sum += a[it*n_cols_a + jt] * b[jt*n_cols_b + kt]; } c[it*n_cols_b + kt] += sum; } } } } } return c; } /* Dot product of two arrays, a and b, or matrix product * Returns an array that's passed in as the last argument, c. * This is a much faster version of the function. * Uses more memory than the simple version, for transposing matrix b, which * can be a problem if the matrix is large - there might not be enough memory. * It's the fastest one, sequential or Open MP, if we optimize for speed. */ data_ptr_res_t dot_faster(cdata_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a, \ cdata_ptr_res_t b, const unsigned n_rows_b, const unsigned n_cols_b, data_ptr_res_t c) { /* Check lengths of the input arrays */ if (n_cols_a != n_rows_b) { printf("#columns A must be equal to #rows B!\n"); system("pause"); exit(-2); } int i = 0, j = 0, k = 0; data_ptr_res_t bt = malloc(n_rows_b * n_cols_b * sizeof(*b)); if (!bt) { printf("Couldn't allocate memory!\n"); system("pause"); exit(-1); } bt = transpose(b, n_rows_b, n_cols_b, bt); #pragma omp parallel for default(none) private(i, j, k) shared(a, n_rows_a, n_cols_a, b, n_rows_b, n_cols_b, c, bt) schedule(static) for (i = 0; i < (int)n_rows_a; i++) { for (k = 0; k < (int)n_cols_b; k++) { data_t sum = 0.0; for (j = 0; j < (int)n_cols_a; j++) { sum += a[i*n_cols_a + j] * bt[k*n_rows_b + j]; } c[i*n_cols_b + k] = sum; } } free(bt); return c; } /* Dot product of two arrays, a and b, or matrix product * Returns an array that's passed in as the last argument, c. * Uses more memory than the simple version, for transposing matrix b, which * can be a problem if the matrix is large - there might not be enough memory. * This was supposed to be the fastest version of the function, * but it's similar in speed to dot_simple_tiled, if we optimize for speed. * But, if we optimize for the smallest code, this version is the fastest, * though, not faster than dot_faster optimized for speed, but of the same speed as it. */ data_ptr_res_t dot_faster_tiled(cdata_ptr_res_t a, const unsigned n_rows_a, const unsigned n_cols_a, \ cdata_ptr_res_t b, const unsigned n_rows_b, const unsigned n_cols_b, data_ptr_res_t c) { /* Check lengths of the input arrays */ if (n_cols_a != n_rows_b) { printf("#columns A must be equal to #rows B!\n"); system("pause"); exit(-2); } int i = 0, j = 0, k = 0, it = 0, jt = 0, kt = 0; data_ptr_res_t bt = malloc(n_rows_b * n_cols_b * sizeof(*b)); if (!bt) { printf("Couldn't allocate memory!\n"); system("pause"); exit(-1); } bt = transpose(b, n_rows_b, n_cols_b, bt); memset(c, 0, n_rows_a * n_cols_b * sizeof(*c)); #pragma omp parallel for default(none) private(i, j, k, it, jt, kt) shared(a, n_rows_a, n_cols_a, b, n_rows_b, n_cols_b, c, bt) schedule(static) for (i = 0; i < (int)n_rows_a; i += TILE_ORDER) { for (k = 0; k < (int)n_cols_b; k += TILE_ORDER) { for (j = 0; j < (int)n_cols_a; j += TILE_ORDER) { for (it = i; it < MIN((int)n_rows_a, i + TILE_ORDER); it++) { for (kt = k; kt < MIN((int)n_cols_b, k + TILE_ORDER); kt++) { data_t sum = 0.0; for (jt = j; jt < MIN((int)n_cols_a, j + TILE_ORDER); jt++) { sum += a[it*n_cols_a + jt] * bt[kt*n_rows_b + jt]; } c[it*n_cols_b + kt] += sum; } } } } } free(bt); return c; } /* Adds two arrays, element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t add_arrays(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] + b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] + *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a + b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a + *b; } return result; } /* Subtracts the second array from the first one, element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t subtract_arrays(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] - b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] - *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a - b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a - *b; } return result; } /* Multiplies two arrays, element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). Tiled version is slightly slower in Open MP, and evidently slower sequentially. */ data_ptr_res_t multiply_arrays(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] * b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] * *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a * b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a * *b; } return result; } /* Multiplies two arrays, element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). Tiled version is slightly slower in Open MP, and evidently slower sequentially. */ data_ptr_res_t multiply_arrays_tiled(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0, it = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i, it) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i += TILE_ORDER) { for (it = i; it < MIN((int)n_a, i + TILE_ORDER); it++) { result[it] = a[it] * b[it]; } } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i, it) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i += TILE_ORDER) { for (it = i; it < MIN((int)n_a, i + TILE_ORDER); it++) { result[it] = a[it] * *b; } } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i, it) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i += TILE_ORDER) { for (it = i; it < MIN((int)n_b, i + TILE_ORDER); it++) { result[it] = *a * b[it]; } } } /* Both a and b are scalars. */ else { result[0] = *a * *b; } return result; } /* Divides two arrays, element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t divide_arrays(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] / b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] / *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a / b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a / *b; } return result; } /* Updates an array, element-wise, by adding another array to it. Takes both arrays in, and returns the updated one (the first one). The return value (address of the first array) doesn't have to be used. Arrays must be of the same length, or, the second one can be a scalar. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t add_update(data_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b) { /* Check lengths of the input arrays */ if (n_a == 0) { printf("'A' cannot be a scalar!\n"); system("pause"); exit(-2); } if ((n_a != n_b) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* b is scalar */ if (n_b == 0) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b) schedule(static) for (i = 0; i < (int)n_a; i++) { a[i] += *b; } } /* b is array */ else { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b) schedule(static) for (i = 0; i < (int)n_a; i++) { a[i] += b[i]; } } return a; } /* Compares two arrays element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. If an element of array a is greater than a corresponding element of array b, the resulting array will have 1.0 in that position; it will have 0.0 otherwise. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t greater_than(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] > b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] > *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a > b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a > *b; } return result; } /* Compares two arrays element-wise, and puts the result in an array that is passed in as the last argument, and also returns it. If an element of array a is equal to a corresponding element of array b, the resulting array will have 1.0 in that position; it will have 0.0 otherwise. Arrays must be of the same length, or, one of them, or both, can be scalars. Use 0 as the length of a scalar, and pass its address in (a pointer to it). */ data_ptr_res_t equal(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b, data_ptr_res_t result) { /* Check lengths of the input arrays */ if ((n_a != n_b) && (n_a != 0) && (n_b != 0)) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } int i = 0; /* Neither a nor b are scalars. */ if ((n_a > 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] == b[i]; } } /* Only b is scalar. */ else if ((n_b == 0) && (n_a > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_a; i++) { result[i] = a[i] == *b; } } /* Only a is scalar. */ else if ((n_a == 0) && (n_b > 0)) { #pragma omp parallel for default(none) private(i) shared(a, n_a, b, n_b, result) schedule(static) for (i = 0; i < (int)n_b; i++) { result[i] = *a == b[i]; } } /* Both a and b are scalars. */ else { result[0] = *a == *b; } return result; } /* Prints vector, or matrix. */ void print(cdata_ptr_res_t m, const unsigned n_rows_m, const unsigned n_cols_m) { for (size_t i = 0; i < n_rows_m; i++) { for (size_t j = 0; j < n_cols_m; j++) { printf("%8.3f ", m[i*n_cols_m + j]); } printf("\n"); } printf("\n"); } /* Sequential function for comparing two arrays by using memcmp Returns 0 if contents of the arrays are the same; -1 or 1 otherwise. */ int compare_memcmp(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b) { /* Check lengths of the input arrays */ if (n_a != n_b) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } return memcmp(a, b, n_a); } /* Sequential function for comparing two arrays by using a loop Returns 0 if contents of the arrays are the same; 1 otherwise. */ int compare(cdata_ptr_res_t a, const unsigned n_a, cdata_ptr_res_t b, const unsigned n_b) { /* Check lengths of the input arrays */ if (n_a != n_b) { printf("Length of A must be equal to length of B!\n"); system("pause"); exit(-2); } for (size_t i = 0; i < n_a; i++) { if (fabs(a[i] - b[i]) > TOLERANCE) { return 1; } } return 0; } /* Compares two scalars within a given TOLERANCE Returns 0 if contents of the arrays are the same; 1 otherwise. */ int compare_scalars(const data_t a, const data_t b) { if (fabs(a - b) > TOLERANCE) { return 1; } return 0; } int main(int argc, char *argv[]) { /* Intializes random number generator */ time_t t; srand((unsigned)time(&t)); srand(0); omp_set_num_threads(8); printf("\tOPEN MP IMPLEMENTATION\n\n"); printf("omp_get_num_procs %i\n", omp_get_num_procs()); printf("omp_get_max_threads %i\n", omp_get_max_threads()); #pragma omp parallel #pragma omp single printf("Working with %d threads.\n\n", omp_get_num_threads()); test(); system("pause"); return(0); } #endif // MATRICES_1D_OPEN_MP
runner.c
#define _CRT_SECURE_NO_WARNINGS #include <stdint.h> void ufbxt_assert_fail(const char *file, uint32_t line, const char *expr); #define ufbx_assert(cond) do { \ if (!(cond)) ufbxt_assert_fail(__FILE__, __LINE__, "Internal assert: " #cond); \ } while (0) #define ufbxt_arraycount(arr) (sizeof(arr) / sizeof(*(arr))) #undef ufbx_assert #include "../ufbx.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include <setjmp.h> #include <stdarg.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #else static int omp_get_thread_num() { return 0; } static int omp_get_num_threads() { return 1; } #endif // -- Thread local #ifdef _MSC_VER #define ufbxt_threadlocal __declspec(thread) #else #define ufbxt_threadlocal __thread #endif // -- Timing typedef struct { uint64_t os_tick; uint64_t cpu_tick; } cputime_sync_point; typedef struct { cputime_sync_point begin, end; uint64_t os_freq; uint64_t cpu_freq; double rcp_os_freq; double rcp_cpu_freq; } cputime_sync_span; extern const cputime_sync_span *cputime_default_sync; void cputime_begin_init(); void cputime_end_init(); void cputime_init(); void cputime_begin_sync(cputime_sync_span *span); void cputime_end_sync(cputime_sync_span *span); uint64_t cputime_cpu_tick(); uint64_t cputime_os_tick(); double cputime_cpu_delta_to_sec(const cputime_sync_span *span, uint64_t cpu_delta); double cputime_os_delta_to_sec(const cputime_sync_span *span, uint64_t os_delta); double cputime_cpu_tick_to_sec(const cputime_sync_span *span, uint64_t cpu_tick); double cputime_os_tick_to_sec(const cputime_sync_span *span, uint64_t os_tick); #if defined(_WIN32) #define WIN32_LEAN_AND_MEAN #define NOMINMAX #include <Windows.h> void cputime_sync_now(cputime_sync_point *sync, int accuracy) { uint64_t best_delta = UINT64_MAX; uint64_t os_tick = 0, cpu_tick = 0; int runs = accuracy ? accuracy : 100; for (int i = 0; i < runs; i++) { LARGE_INTEGER begin, end; QueryPerformanceCounter(&begin); uint64_t cycle = __rdtsc(); QueryPerformanceCounter(&end); uint64_t delta = end.QuadPart - begin.QuadPart; if (delta < best_delta) { os_tick = (begin.QuadPart + end.QuadPart) / 2; cpu_tick = cycle; } if (delta == 0) break; } sync->cpu_tick = cpu_tick; sync->os_tick = os_tick; } uint64_t cputime_cpu_tick() { return __rdtsc(); } uint64_t cputime_os_tick() { LARGE_INTEGER res; QueryPerformanceCounter(&res); return res.QuadPart; } static uint64_t cputime_os_freq() { LARGE_INTEGER res; QueryPerformanceFrequency(&res); return res.QuadPart; } static void cputime_os_wait() { Sleep(1); } #else #include <time.h> // TODO: Other architectures #include <x86intrin.h> void cputime_sync_now(cputime_sync_point *sync, int accuracy) { uint64_t best_delta = UINT64_MAX; uint64_t os_tick, cpu_tick; struct timespec begin, end; int runs = accuracy ? accuracy : 100; for (int i = 0; i < runs; i++) { clock_gettime(CLOCK_REALTIME, &begin); uint64_t cycle = (uint64_t)__rdtsc(); clock_gettime(CLOCK_REALTIME, &end); uint64_t begin_ns = (uint64_t)begin.tv_sec*UINT64_C(1000000000) + (uint64_t)begin.tv_nsec; uint64_t end_ns = (uint64_t)end.tv_sec*UINT64_C(1000000000) + (uint64_t)end.tv_nsec; uint64_t delta = end_ns - begin_ns; if (delta < best_delta) { os_tick = (begin_ns + end_ns) / 2; cpu_tick = cycle; } if (delta == 0) break; } sync->cpu_tick = cpu_tick; sync->os_tick = os_tick; } uint64_t cputime_cpu_tick() { return (uint64_t)__rdtsc(); } uint64_t cputime_os_tick() { struct timespec ts; clock_gettime(CLOCK_REALTIME, &ts); return ts.tv_sec*UINT64_C(1000000000) + (uint64_t)ts.tv_nsec; } static uint64_t cputime_os_freq() { return UINT64_C(1000000000); } static void cputime_os_wait() { struct timespec duration; duration.tv_sec = 0; duration.tv_nsec = 1000000000l; nanosleep(&duration, NULL); } #endif static cputime_sync_span g_cputime_sync; const cputime_sync_span *cputime_default_sync = &g_cputime_sync; void cputime_begin_init() { cputime_begin_sync(&g_cputime_sync); } void cputime_end_init() { cputime_end_sync(&g_cputime_sync); } void cputime_init() { cputime_begin_init(); cputime_end_init(); } void cputime_begin_sync(cputime_sync_span *span) { cputime_sync_now(&span->begin, 0); } void cputime_end_sync(cputime_sync_span *span) { uint64_t os_freq = cputime_os_freq(); uint64_t min_span = os_freq / 1000; uint64_t os_tick = cputime_os_tick(); while (os_tick - span->begin.os_tick <= min_span) { cputime_os_wait(); os_tick = cputime_os_tick(); } cputime_sync_now(&span->end, 0); uint64_t len_os = span->end.os_tick - span->begin.os_tick; uint64_t len_cpu = span->end.cpu_tick - span->begin.cpu_tick; double cpu_freq = (double)len_cpu / (double)len_os * (double)os_freq; span->os_freq = os_freq; span->cpu_freq = (uint64_t)cpu_freq; span->rcp_os_freq = 1.0 / (double)os_freq; span->rcp_cpu_freq = 1.0 / cpu_freq; } double cputime_cpu_delta_to_sec(const cputime_sync_span *span, uint64_t cpu_delta) { if (!span) span = &g_cputime_sync; return (double)cpu_delta * span->rcp_cpu_freq; } double cputime_os_delta_to_sec(const cputime_sync_span *span, uint64_t os_delta) { if (!span) span = &g_cputime_sync; return (double)os_delta * span->rcp_os_freq; } double cputime_cpu_tick_to_sec(const cputime_sync_span *span, uint64_t cpu_tick) { if (!span) span = &g_cputime_sync; return (double)(cpu_tick - span->begin.cpu_tick) * span->rcp_cpu_freq; } double cputime_os_tick_to_sec(const cputime_sync_span *span, uint64_t os_tick) { if (!span) span = &g_cputime_sync; return (double)(os_tick - span->begin.os_tick) * span->rcp_os_freq; } // -- Vector helpers static ufbx_real ufbxt_dot2(ufbx_vec2 a, ufbx_vec2 b) { return a.x*b.x + a.y*b.y; } static ufbx_real ufbxt_dot3(ufbx_vec3 a, ufbx_vec3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } static ufbx_vec2 ufbxt_add2(ufbx_vec2 a, ufbx_vec2 b) { ufbx_vec2 v; v.x = a.x + b.x; v.y = a.y + b.y; return v; } static ufbx_vec3 ufbxt_add3(ufbx_vec3 a, ufbx_vec3 b) { ufbx_vec3 v; v.x = a.x + b.x; v.y = a.y + b.y; v.z = a.z + b.z; return v; } static ufbx_vec2 ufbxt_sub2(ufbx_vec2 a, ufbx_vec2 b) { ufbx_vec2 v; v.x = a.x - b.x; v.y = a.y - b.y; return v; } static ufbx_vec3 ufbxt_sub3(ufbx_vec3 a, ufbx_vec3 b) { ufbx_vec3 v; v.x = a.x - b.x; v.y = a.y - b.y; v.z = a.z - b.z; return v; } // -- Test framework #define ufbxt_memory_context(data) \ ufbxt_make_memory_context(data, (uint32_t)sizeof(data) - 1) #define ufbxt_memory_context_values(data) \ ufbxt_make_memory_context_values(data, (uint32_t)sizeof(data) - 1) #define ufbxt_assert(cond) do { \ if (!(cond)) ufbxt_assert_fail(__FILE__, __LINE__, #cond); \ } while (0) #define ufbxt_assert_eq(a, b, size) do { \ ufbxt_assert_eq_test(a, b, size, __FILE__, __LINE__, \ "ufbxt_assert_eq(" #a ", " #b ", " #size ")"); \ } while (0) typedef struct { int failed; const char *file; uint32_t line; const char *expr; } ufbxt_fail; typedef struct { const char *name; void (*func)(void); ufbxt_fail fail; } ufbxt_test; ufbxt_test *g_current_test; uint64_t g_bechmark_begin_tick; ufbx_error g_error; jmp_buf g_test_jmp; int g_verbose; char g_log_buf[16*1024]; uint32_t g_log_pos; char g_hint[8*1024]; bool g_skip_print_ok = false; typedef struct { char *test_name; uint8_t patch_value; uint32_t patch_offset; uint32_t temp_limit; uint32_t result_limit; uint32_t truncate_length; const char *description; } ufbxt_check_line; static ufbxt_check_line g_checks[16384]; ufbxt_threadlocal jmp_buf *t_jmp_buf; void ufbxt_assert_fail(const char *file, uint32_t line, const char *expr) { if (t_jmp_buf) { longjmp(g_test_jmp, 1); } printf("FAIL\n"); fflush(stdout); g_current_test->fail.failed = 1; g_current_test->fail.file = file; g_current_test->fail.line = line; g_current_test->fail.expr = expr; longjmp(g_test_jmp, 1); } void ufbxt_logf(const char *fmt, ...) { if (!g_verbose) return; va_list args; va_start(args, fmt); if (g_log_pos < sizeof(g_log_buf)) { g_log_pos += vsnprintf(g_log_buf + g_log_pos, sizeof(g_log_buf) - g_log_pos, fmt, args); if (g_log_pos < sizeof(g_log_buf)) { g_log_buf[g_log_pos] = '\n'; g_log_pos++; } } va_end(args); } void ufbxt_hintf(const char *fmt, ...) { va_list args; va_start(args, fmt); vsnprintf(g_hint, sizeof(g_hint), fmt, args); va_end(args); } void ufbxt_assert_eq_test(const void *a, const void *b, size_t size, const char *file, uint32_t line, const char *expr) { const char *ac = (const char *)a; const char *bc = (const char *)b; for (size_t i = 0; i < size; i++) { if (ac[i] == bc[i]) continue; ufbxt_logf("Byte offset %u: 0x%02x != 0x%02x\n", (uint32_t)i, (uint8_t)ac[i], (uint8_t)bc[i]); ufbxt_assert_fail(file, line, expr); } } void ufbxt_log_flush() { int prev_newline = 1; for (uint32_t i = 0; i < g_log_pos; i++) { if (i >= sizeof(g_log_buf)) break; char ch = g_log_buf[i]; if (ch == '\n') { putchar('\n'); prev_newline = 1; } else { if (prev_newline) { putchar(' '); putchar(' '); } prev_newline = 0; putchar(ch); } } g_log_pos = 0; } void ufbxt_log_error(ufbx_error *err) { if (!err) return; for (size_t i = 0; i < err->stack_size; i++) { ufbx_error_frame *f = &err->stack[i]; ufbxt_logf("Line %u %s: %s", f->source_line, f->function, f->description); } } void ufbxt_bechmark_begin() { g_bechmark_begin_tick = cputime_cpu_tick(); } double ufbxt_bechmark_end() { uint64_t end_tick = cputime_cpu_tick(); uint64_t delta = end_tick - g_bechmark_begin_tick; double sec = cputime_cpu_delta_to_sec(NULL, delta); double ghz = (double)cputime_default_sync->cpu_freq / 1e9; ufbxt_logf("%.3fms / %ukcy at %.2fGHz", sec * 1e3, (uint32_t)(delta / 1000), ghz); return sec; } char data_root[256]; static void *ufbxt_read_file(const char *name, size_t *p_size) { FILE *file = fopen(name, "rb"); if (!file) return NULL; fseek(file, 0, SEEK_END); size_t size = ftell(file); fseek(file, 0, SEEK_SET); char *data = malloc(size + 1); ufbxt_assert(data != NULL); size_t num_read = fread(data, 1, size, file); fclose(file); data[size] = '\0'; if (num_read != size) { ufbxt_assert_fail(__FILE__, __LINE__, "Failed to load file"); } *p_size = size; return data; } typedef struct { char name[64]; size_t num_faces; size_t num_indices; ufbx_face *faces; ufbx_vertex_vec3 vertex_position; ufbx_vertex_vec3 vertex_normal; ufbx_vertex_vec2 vertex_uv; } ufbxt_obj_mesh; typedef struct { ufbxt_obj_mesh *meshes; size_t num_meshes; } ufbxt_obj_file; static ufbxt_obj_file *ufbxt_load_obj(void *obj_data, size_t obj_size) { size_t num_positions = 0; size_t num_normals = 0; size_t num_uvs = 0; size_t num_faces = 0; size_t num_meshes = 0; char *line = (char*)obj_data; for (;;) { char *end = strpbrk(line, "\r\n"); char prev = '\0'; if (end) { prev = *end; *end = '\0'; } if (!strncmp(line, "v ", 2)) num_positions++; else if (!strncmp(line, "vt ", 3)) num_uvs++; else if (!strncmp(line, "vn ", 3)) num_normals++; else if (!strncmp(line, "f ", 2)) num_faces++; else if (!strncmp(line, "g default", 7)) { /* ignore default group */ } else if (!strncmp(line, "g ", 2)) num_meshes++; if (end) { *end = prev; line = end + 1; } else { break; } } size_t alloc_size = 0; alloc_size += sizeof(ufbxt_obj_file); alloc_size += num_positions * sizeof(ufbx_vec3); alloc_size += num_normals * sizeof(ufbx_vec3); alloc_size += num_uvs * sizeof(ufbx_vec2); alloc_size += num_faces * sizeof(ufbx_face); alloc_size += num_faces * 3 * 4 * sizeof(int32_t); alloc_size += num_meshes * sizeof(ufbxt_obj_mesh); void *data = malloc(alloc_size); ufbxt_assert(data); ufbxt_obj_file *obj = (ufbxt_obj_file*)data; ufbx_vec3 *positions = (ufbx_vec3*)(obj + 1); ufbx_vec3 *normals = (ufbx_vec3*)(positions + num_positions); ufbx_vec2 *uvs = (ufbx_vec2*)(normals + num_normals); ufbx_face *faces = (ufbx_face*)(uvs + num_uvs); int32_t *position_indices = (int32_t*)(faces + num_faces); int32_t *normal_indices = (int32_t*)(position_indices + num_faces * 4); int32_t *uv_indices = (int32_t*)(normal_indices + num_faces * 4); ufbxt_obj_mesh *meshes = (ufbxt_obj_mesh*)(uv_indices + num_faces * 4); void *data_end = meshes + num_meshes; ufbxt_assert((char*)data_end - (char*)data == alloc_size); ufbx_vec3 *dp = positions; ufbx_vec3 *dn = normals; ufbx_vec2 *du = uvs; ufbxt_obj_mesh *mesh = NULL; int32_t *dpi = position_indices; int32_t *dni = normal_indices; int32_t *dui = uv_indices; ufbx_face *df = faces; obj->meshes = meshes; obj->num_meshes = num_meshes; line = (char*)obj_data; for (;;) { char *line_end = strpbrk(line, "\r\n"); char prev = '\0'; if (line_end) { prev = *line_end; *line_end = '\0'; } if (!strncmp(line, "v ", 2)) { ufbxt_assert(sscanf(line, "v %lf %lf %lf", &dp->x, &dp->y, &dp->z) == 3); dp++; } else if (!strncmp(line, "vt ", 3)) { ufbxt_assert(sscanf(line, "vt %lf %lf", &du->x, &du->y) == 2); du++; } else if (!strncmp(line, "vn ", 3)) { ufbxt_assert(sscanf(line, "vn %lf %lf %lf", &dn->x, &dn->y, &dn->z) == 3); dn++; } else if (!strncmp(line, "f ", 2)) { ufbxt_assert(mesh); df->index_begin = (uint32_t)mesh->num_indices; df->num_indices = 0; char *begin = line + 2; do { char *end = strchr(begin, ' '); if (end) *end++ = '\0'; int pi = 0, ui = 0, ni = 0; if (sscanf(begin, "%d/%d/%d", &pi, &ui, &ni) == 3) { } else if (sscanf(begin, "%d//%d", &pi, &ni) == 2) { } else { ufbxt_assert(0 && "Failed to parse face indices"); } *dpi++ = pi - 1; *dni++ = ni - 1; *dui++ = ui - 1; mesh->num_indices++; df->num_indices++; begin = end; } while (begin); mesh->num_faces++; df++; } else if (!strncmp(line, "g default", 7)) { /* ignore default group */ } else if (!strncmp(line, "g ", 2)) { mesh = mesh ? mesh + 1 : meshes; memset(mesh, 0, sizeof(ufbxt_obj_mesh)); // HACK: Truncate name at '_' to separate Blender // model and mesh names size_t len = strcspn(line + 2, "_"); ufbxt_assert(len < sizeof(mesh->name)); memcpy(mesh->name, line + 2, len); mesh->faces = df; mesh->vertex_position.data = positions; mesh->vertex_normal.data = normals; mesh->vertex_uv.data = uvs; mesh->vertex_position.indices = dpi; mesh->vertex_normal.indices = dni; mesh->vertex_uv.indices = dui; } if (line_end) { *line_end = prev; line = line_end + 1; } else { break; } } return obj; } typedef struct { size_t num; ufbx_real sum; ufbx_real max; } ufbxt_diff_error; static void ufbxt_assert_close_real(ufbxt_diff_error *p_err, ufbx_real a, ufbx_real b) { ufbx_real err = fabs(a - b); ufbxt_assert(err < 0.001); p_err->num++; p_err->sum += err; if (err > p_err->max) p_err->max = err; } static void ufbxt_assert_close_vec2(ufbxt_diff_error *p_err, ufbx_vec2 a, ufbx_vec2 b) { ufbxt_assert_close_real(p_err, a.x, b.x); ufbxt_assert_close_real(p_err, a.y, b.y); } static void ufbxt_assert_close_vec3(ufbxt_diff_error *p_err, ufbx_vec3 a, ufbx_vec3 b) { ufbxt_assert_close_real(p_err, a.x, b.x); ufbxt_assert_close_real(p_err, a.y, b.y); ufbxt_assert_close_real(p_err, a.z, b.z); } static void ufbxt_assert_close_vec4(ufbxt_diff_error *p_err, ufbx_vec4 a, ufbx_vec4 b) { ufbxt_assert_close_real(p_err, a.x, b.x); ufbxt_assert_close_real(p_err, a.y, b.y); ufbxt_assert_close_real(p_err, a.z, b.z); ufbxt_assert_close_real(p_err, a.w, b.w); } static void ufbxt_diff_to_obj(ufbx_scene *scene, ufbxt_obj_file *obj, ufbxt_diff_error *p_err, bool approx_normals) { for (size_t mesh_i = 0; mesh_i < obj->num_meshes; mesh_i++) { ufbxt_obj_mesh *obj_mesh = &obj->meshes[mesh_i]; if (obj_mesh->num_indices == 0) continue; ufbx_mesh *mesh = ufbx_find_mesh(scene, obj_mesh->name); ufbxt_assert(mesh); ufbxt_assert(obj_mesh->num_faces == mesh->num_faces); ufbxt_assert(obj_mesh->num_indices == mesh->num_indices); ufbx_matrix *mat = &mesh->node.to_root; ufbx_matrix norm_mat = ufbx_get_normal_matrix(mat); // Assume that the indices are in the same order! for (size_t face_ix = 0; face_ix < mesh->num_faces; face_ix++) { ufbx_face obj_face = obj_mesh->faces[face_ix]; ufbx_face face = mesh->faces[face_ix]; ufbxt_assert(obj_face.index_begin == face.index_begin); ufbxt_assert(obj_face.num_indices == face.num_indices); for (size_t ix = 0; ix < face.num_indices; ix++) { ufbx_vec3 op = ufbx_get_vertex_vec3(&obj_mesh->vertex_position, ix); ufbx_vec3 fp = ufbx_get_vertex_vec3(&mesh->vertex_position, ix); ufbx_vec3 on = ufbx_get_vertex_vec3(&obj_mesh->vertex_normal, ix); ufbx_vec3 fn = ufbx_get_vertex_vec3(&mesh->vertex_normal, ix); fp = ufbx_transform_position(mat, fp); fn = ufbx_transform_direction(&norm_mat, fn); ufbx_real fn_len = sqrt(fn.x*fn.x + fn.y*fn.y + fn.z*fn.z); fn.x /= fn_len; fn.y /= fn_len; fn.z /= fn_len; ufbxt_assert_close_vec3(p_err, op, fp); if (approx_normals) { ufbx_real dot = ufbxt_dot3(on, fn); ufbxt_assert(dot >= 0.9); } else { ufbxt_assert_close_vec3(p_err, on, fn); } if (mesh->vertex_uv.data) { ufbxt_assert(obj_mesh->vertex_uv.data); ufbx_vec2 ou = ufbx_get_vertex_vec2(&obj_mesh->vertex_uv, ix); ufbx_vec2 fu = ufbx_get_vertex_vec2(&mesh->vertex_uv, ix); ufbxt_assert_close_vec2(p_err, ou, fu); } } } } } void ufbxt_check_string(ufbx_string str) { // Data may never be NULL, empty strings should have data = "" ufbxt_assert(str.data != NULL); ufbxt_assert(strlen(str.data) == str.length); } void ufbxt_check_vertex_element(ufbx_scene *scene, ufbx_mesh *mesh, void *void_elem, size_t elem_size) { ufbx_vertex_void *elem = (ufbx_vertex_void*)void_elem; if (elem->data == NULL) { ufbxt_assert(elem->indices == NULL); ufbxt_assert(elem->num_elements == 0); return; } ufbxt_assert(elem->num_elements >= 0); ufbxt_assert(elem->indices != NULL); // Check that the indices are in range for (size_t i = 0; i < mesh->num_indices; i++) { int32_t ix = elem->indices[i]; ufbxt_assert(ix >= -1 && ix < elem->num_elements); } // Check that the data at invalid index is valid and zero char zero[32] = { 0 }; ufbxt_assert(elem_size <= 32); ufbxt_assert(!memcmp((char*)elem->data - elem_size, zero, elem_size)); } void ufbxt_check_props(ufbx_scene *scene, ufbx_props *props, bool top) { ufbx_prop *prev = NULL; for (size_t i = 0; i < props->num_props; i++) { ufbx_prop *prop = &props->props[i]; ufbxt_assert(prop->type < UFBX_NUM_PROP_TYPES); ufbxt_check_string(prop->name); ufbxt_check_string(prop->value_str); // Properties should be sorted by name and duplicates should be removed if (prev) { ufbxt_assert(prop->imp_key >= prev->imp_key); ufbxt_assert(strcmp(prop->name.data, prev->name.data) > 0); } if (top) { ufbx_prop *ref = ufbx_find_prop(props, prop->name.data); ufbxt_assert(prop == ref); } prev = prop; } if (props->defaults) { ufbxt_check_props(scene, props->defaults, false); } } void ufbxt_check_node(ufbx_scene *scene, ufbx_node *node) { ufbxt_check_string(node->name); ufbxt_check_props(scene, &node->props, true); if (node->parent) { bool found = false; for (size_t i = 0; i < node->parent->children.size; i++) { if (node->parent->children.data[i] == node) { found = true; break; } } ufbxt_assert(found); } for (size_t i = 0; i < node->children.size; i++) { ufbxt_assert(node->children.data[i]->parent == node); } } void ufbxt_check_mesh(ufbx_scene *scene, ufbx_mesh *mesh) { ufbx_mesh *found = ufbx_find_mesh(scene, mesh->node.name.data); ufbxt_assert(found && !strcmp(found->node.name.data, mesh->node.name.data)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_position, sizeof(ufbx_vec3)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_normal, sizeof(ufbx_vec3)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_binormal, sizeof(ufbx_vec3)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_tangent, sizeof(ufbx_vec3)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_uv, sizeof(ufbx_vec2)); ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_color, sizeof(ufbx_vec4)); ufbxt_assert(mesh->num_vertices == mesh->vertex_position.num_elements); ufbxt_assert(mesh->num_triangles <= mesh->num_indices); uint32_t prev_end = 0; for (size_t i = 0; i < mesh->num_faces + mesh->num_bad_faces; i++) { ufbx_face face = mesh->faces[i]; if (i == mesh->num_faces) prev_end = 0; if (mesh->num_bad_faces == 0) { ufbxt_assert(face.index_begin == prev_end); } else { ufbxt_assert(face.index_begin >= prev_end); } if (i < mesh->num_faces) { ufbxt_assert(face.num_indices >= 3); } else { ufbxt_assert(face.num_indices > 0 && face.num_indices < 3); } prev_end = face.index_begin + face.num_indices; ufbxt_assert(prev_end <= mesh->num_indices); for (size_t j = face.index_begin; j < face.index_begin + face.num_indices; j++) { ufbx_face *p_face = ufbx_find_face(mesh, j); ufbxt_assert(p_face - mesh->faces == i); } if (face.num_indices >= 3) { size_t num_tris = face.num_indices - 2; ufbxt_assert(face.num_indices <= 1024); uint32_t tris[1024]; size_t ix_count[1024]; memset(tris, 0xff, num_tris * 3 * sizeof(uint32_t)); memset(ix_count, 0, face.num_indices * sizeof(uint32_t)); ufbxt_assert(ufbx_triangulate(tris, ufbxt_arraycount(tris), mesh, face)); for (size_t i = 0; i < num_tris; i++) { uint32_t a = tris[i*3 + 0]; uint32_t b = tris[i*3 + 1]; uint32_t c = tris[i*3 + 2]; ufbxt_assert(a != b); ufbxt_assert(b != c); ufbxt_assert(a >= face.index_begin && a - face.index_begin < face.num_indices); ufbxt_assert(b >= face.index_begin && b - face.index_begin < face.num_indices); ufbxt_assert(c >= face.index_begin && c - face.index_begin < face.num_indices); ix_count[a - face.index_begin]++; ix_count[b - face.index_begin]++; ix_count[c - face.index_begin]++; } for (uint32_t i = 0; i < face.num_indices; i++) { ufbxt_assert(ix_count[i] >= 0); } } } for (size_t i = 0; i < mesh->num_edges; i++) { ufbx_edge edge = mesh->edges[i]; ufbxt_assert(edge.indices[0] < mesh->num_indices); ufbxt_assert(edge.indices[1] < mesh->num_indices); } for (size_t i = 0; i < mesh->uv_sets.size; i++) { ufbx_uv_set *set = &mesh->uv_sets.data[i]; if (i == 0) { ufbxt_assert(mesh->vertex_uv.data == set->vertex_uv.data); ufbxt_assert(mesh->vertex_uv.indices == set->vertex_uv.indices); ufbxt_assert(mesh->vertex_uv.num_elements == set->vertex_uv.num_elements); } ufbxt_check_string(set->name); ufbxt_check_vertex_element(scene, mesh, &set->vertex_uv, sizeof(ufbx_vec2)); } for (size_t i = 0; i < mesh->color_sets.size; i++) { ufbx_color_set *set = &mesh->color_sets.data[i]; if (i == 0) { ufbxt_assert(mesh->vertex_color.data == set->vertex_color.data); ufbxt_assert(mesh->vertex_color.indices == set->vertex_color.indices); ufbxt_assert(mesh->vertex_color.num_elements == set->vertex_color.num_elements); } ufbxt_check_string(set->name); ufbxt_check_vertex_element(scene, mesh, &set->vertex_color, sizeof(ufbx_vec4)); } for (size_t i = 0; i < mesh->num_edges; i++) { ufbx_edge edge = mesh->edges[i]; ufbxt_assert(edge.indices[0] < mesh->num_indices); ufbxt_assert(edge.indices[1] < mesh->num_indices); ufbx_face *face = ufbx_find_face(mesh, edge.indices[0]); ufbxt_assert(face); ufbxt_assert(face == ufbx_find_face(mesh, edge.indices[1])); } if (mesh->face_material) { for (size_t i = 0; i < mesh->num_faces; i++) { int32_t material = mesh->face_material[i]; ufbxt_assert(material >= 0 && material < mesh->materials.size); } } for (size_t i = 0; i < mesh->skins.size; i++) { ufbx_skin *skin = &mesh->skins.data[i]; ufbxt_assert(skin->bone); ufbxt_check_node(scene, skin->bone); for (size_t j = 0; j < skin->num_weights; j++) { ufbxt_assert(skin->indices[j] >= -1 && skin->indices[j] < mesh->num_vertices); } } } void ufbxt_check_material(ufbx_scene *scene, ufbx_material *material) { ufbxt_check_string(material->name); ufbxt_check_props(scene, &material->props, true); } void ufbxt_check_anim_stack(ufbx_scene *scene, ufbx_anim_stack *anim_stack) { ufbxt_check_string(anim_stack->name); ufbxt_check_props(scene, &anim_stack->props, true); for (size_t i = 0; i < anim_stack->layers.size; i++) { ufbx_anim_layer *layer = anim_stack->layers.data[i]; ptrdiff_t layer_i = scene->anim_layers.data - layer; ufbxt_assert(layer >= scene->anim_layers.data); ufbxt_assert(layer < scene->anim_layers.data + scene->anim_layers.size); } } void ufbxt_check_anim_layer(ufbx_scene *scene, ufbx_anim_layer *anim_layer) { ufbxt_check_string(anim_layer->name); ufbxt_check_props(scene, &anim_layer->layer_props, true); } void ufbxt_check_anim_prop(ufbx_scene *scene, ufbx_anim_prop *anim_prop) { ufbxt_check_string(anim_prop->name); switch (anim_prop->target) { case UFBX_ANIM_UNKNOWN: /* Nop */ break; case UFBX_ANIM_MODEL: ufbxt_assert(anim_prop->index < scene->models.size); break; case UFBX_ANIM_MESH: ufbxt_assert(anim_prop->index < scene->meshes.size); break; case UFBX_ANIM_LIGHT: ufbxt_assert(anim_prop->index < scene->lights.size); break; case UFBX_ANIM_MATERIAL: ufbxt_assert(anim_prop->index < scene->materials.size); break; case UFBX_ANIM_BONE: ufbxt_assert(anim_prop->index < scene->bones.size); break; case UFBX_ANIM_ANIM_LAYER: ufbxt_assert(anim_prop->index < scene->anim_layers.size); break; default: ufbxt_assert(0 && "Bad anim target"); break; } } void ufbxt_check_scene(ufbx_scene *scene) { ufbxt_check_string(scene->metadata.creator); for (size_t i = 0; i < scene->nodes.size; i++) { ufbxt_check_node(scene, scene->nodes.data[i]); } for (size_t i = 0; i < scene->meshes.size; i++) { ufbxt_check_mesh(scene, &scene->meshes.data[i]); } for (size_t i = 0; i < scene->materials.size; i++) { ufbxt_check_material(scene, &scene->materials.data[i]); } for (size_t i = 0; i < scene->anim_stacks.size; i++) { ufbxt_check_anim_stack(scene, &scene->anim_stacks.data[i]); } for (size_t i = 0; i < scene->anim_layers.size; i++) { ufbxt_check_anim_layer(scene, &scene->anim_layers.data[i]); } for (size_t i = 0; i < scene->anim_props.size; i++) { ufbxt_check_anim_prop(scene, &scene->anim_props.data[i]); } } static uint32_t g_file_version = 0; static const char *g_file_type = NULL; static bool g_fuzz = false; static bool g_all_byte_values = false; static bool g_dedicated_allocs = false; static bool g_no_patch = false; static int g_patch_start = 0; static size_t g_fuzz_step = 0; const char *g_fuzz_test_name = NULL; static bool ufbxt_begin_fuzz() { if (g_fuzz) { if (!g_skip_print_ok) { printf("FUZZ\n"); g_skip_print_ok = true; } return true; } else { return false; } } int ufbxt_test_fuzz(void *data, size_t size, size_t step, int offset, size_t temp_limit, size_t result_limit, size_t truncate_length) { if (g_fuzz_step && step != g_fuzz_step) return 1; t_jmp_buf = (jmp_buf*)calloc(1, sizeof(jmp_buf)); int ret = 1; if (!setjmp(*t_jmp_buf)) { ufbx_load_opts opts = { 0 }; opts.max_temp_allocs = temp_limit; opts.max_result_allocs = result_limit; if (g_dedicated_allocs) { opts.temp_huge_size = 1; opts.result_huge_size = 1; } if (truncate_length > 0) size = truncate_length; ufbx_error error; ufbx_scene *scene = ufbx_load_memory(data, size, &opts, &error); if (scene) { ufbxt_check_scene(scene); ufbx_free_scene(scene); ufbxt_assert(temp_limit == 0); ufbxt_assert(result_limit == 0); } else { // Collect hit checks for (size_t i = 0; i < error.stack_size; i++) { ufbx_error_frame frame = error.stack[i]; ufbxt_check_line *check = &g_checks[frame.source_line]; if (check->test_name && strcmp(g_fuzz_test_name, check->test_name)) continue; if ((uint32_t)offset > check->patch_offset - 1) continue; #pragma omp critical(check) { bool ok = (uint32_t)offset <= check->patch_offset - 1; if (check->test_name && strcmp(g_fuzz_test_name, check->test_name)) ok = false; if (ok) { if (!check->test_name) { size_t name_len = strlen(g_fuzz_test_name) + 1; check->test_name = (char*)malloc(name_len); if (check->test_name) { memcpy(check->test_name, g_fuzz_test_name, name_len); } } if (offset < 0) { check->patch_offset = UINT32_MAX; check->patch_value = 0; } else { check->patch_offset = offset + 1; check->patch_value = ((uint8_t*)data)[offset]; } check->temp_limit = (uint32_t)temp_limit; check->result_limit = (uint32_t)result_limit; check->truncate_length = (uint32_t)truncate_length; check->description = frame.description; } } } } } else { ret = 0; } free(t_jmp_buf); t_jmp_buf = NULL; return ret; } typedef struct { const char *name; int32_t patch_offset; uint8_t patch_value; uint32_t temp_limit; uint32_t result_limit; uint32_t truncate_length; const char *description; } ufbxt_fuzz_check; // Generated by running `runner --fuzz` static const ufbxt_fuzz_check g_fuzz_checks[] = { { "blender_279_ball_6100_ascii", 18422, 82, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->face_smoothing, n..." }, { "blender_279_ball_6100_ascii", 18755, 76, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->face_material, n,..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 0, 17, "header" }, { "blender_279_default_6100_ascii", -1, 0, 0, 0, 17, "uc->read_fn" }, { "blender_279_default_6100_ascii", -1, 0, 0, 1, 0, "defs" }, { "blender_279_default_6100_ascii", -1, 0, 0, 16, 0, "dst" }, { "blender_279_default_6100_ascii", -1, 0, 0, 16, 0, "name" }, { "blender_279_default_6100_ascii", -1, 0, 0, 160, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 0, 17, 0, "name" }, { "blender_279_default_6100_ascii", -1, 0, 0, 256, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 0, 262, 0, "mesh->faces" }, { "blender_279_default_6100_ascii", -1, 0, 0, 3, 0, "defs->props" }, { "blender_279_default_6100_ascii", -1, 0, 0, 301, 0, "dst->props" }, { "blender_279_default_6100_ascii", -1, 0, 0, 302, 0, "ufbxi_merge_properties(uc, &material->props, object->pr..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 324, 0, "ufbxi_push_string_place_str(uc, &layer->name)" }, { "blender_279_default_6100_ascii", -1, 0, 0, 336, 0, "curve->keyframes.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 352, 0, "arr->data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 352, 0, "ufbxi_retain_array(uc, sizeof(ufbx_model), &uc->scene.m..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 353, 0, "ufbxi_retain_array(uc, sizeof(ufbx_mesh), &uc->scene.me..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 354, 0, "ufbxi_retain_array(uc, sizeof(ufbx_material), &uc->scen..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 355, 0, "ufbxi_retain_array(uc, sizeof(ufbx_light), &uc->scene.l..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 356, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_stack), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 357, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_layer), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 358, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_prop), &uc->sce..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 359, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_curve), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 360, 0, "zero_indices && consecutive_indices" }, { "blender_279_default_6100_ascii", -1, 0, 0, 362, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 362, 0, "ufbxi_merge_properties(uc, props, props, attr, &uc->res..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 363, 0, "stack->layers.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 364, 0, "layer->props.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 364, 0, "mesh->faces" }, { "blender_279_default_6100_ascii", -1, 0, 0, 365, 0, "mesh->materials.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 366, 0, "nodes" }, { "blender_279_default_6100_ascii", -1, 0, 0, 367, 0, "node->children.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 367, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_model), &nodes, uc-..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 40, 0, "ufbxi_push_string_place_str(uc, name)" }, { "blender_279_default_6100_ascii", -1, 0, 0, 434, 0, "dst->props" }, { "blender_279_default_6100_ascii", -1, 0, 0, 435, 0, "ufbxi_merge_properties(uc, &material->props, object->pr..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 475, 0, "ufbxi_push_string_place_str(uc, &layer->name)" }, { "blender_279_default_6100_ascii", -1, 0, 0, 480, 0, "curve->keyframes.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 504, 0, "ufbxi_retain_array(uc, sizeof(ufbx_model), &uc->scene.m..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 505, 0, "ufbxi_retain_array(uc, sizeof(ufbx_mesh), &uc->scene.me..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 506, 0, "ufbxi_retain_array(uc, sizeof(ufbx_material), &uc->scen..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 507, 0, "ufbxi_retain_array(uc, sizeof(ufbx_light), &uc->scene.l..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 508, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_stack), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 509, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_layer), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 510, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_prop), &uc->sce..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 512, 0, "arr->data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 512, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_curve), &uc->sc..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 513, 0, "zero_indices && consecutive_indices" }, { "blender_279_default_6100_ascii", -1, 0, 0, 517, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 517, 0, "ufbxi_merge_properties(uc, props, props, attr, &uc->res..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 519, 0, "stack->layers.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 520, 0, "layer->props.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 522, 0, "mesh->materials.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 524, 0, "nodes" }, { "blender_279_default_6100_ascii", -1, 0, 0, 526, 0, "node->children.data" }, { "blender_279_default_6100_ascii", -1, 0, 0, 526, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_model), &nodes, uc-..." }, { "blender_279_default_6100_ascii", -1, 0, 0, 528, 0, "imp" }, { "blender_279_default_6100_ascii", -1, 0, 0, 62, 0, "ufbxi_push_string_place_str(uc, name)" }, { "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_load_strings(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_map_grow(&uc->string_map, ufbx_string, 16)" }, { "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_push_string_imp(uc, str->data, str->length, false..." }, { "blender_279_default_6100_ascii", -1, 0, 1093, 0, 0, "light" }, { "blender_279_default_6100_ascii", -1, 0, 11, 0, 0, "prop->name.data" }, { "blender_279_default_6100_ascii", -1, 0, 11, 0, 0, "ufbxi_load_default_props(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 1252, 0, 0, "arr" }, { "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_ascii_next_token(uc, &uc->ascii.token)" }, { "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_ascii_push_token_char(uc, token, c)" }, { "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_begin_parse(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 1382, 0, 0, "mesh" }, { "blender_279_default_6100_ascii", -1, 0, 1384, 0, 0, "mesh" }, { "blender_279_default_6100_ascii", -1, 0, 1386, 0, 0, "conn" }, { "blender_279_default_6100_ascii", -1, 0, 1386, 0, 0, "ufbxi_add_connection(uc, object->id, geom_obj.id, NULL)" }, { "blender_279_default_6100_ascii", -1, 0, 14, 0, 0, "model" }, { "blender_279_default_6100_ascii", -1, 0, 1437, 0, 0, "light" }, { "blender_279_default_6100_ascii", -1, 0, 1596, 0, 0, "arr" }, { "blender_279_default_6100_ascii", -1, 0, 16, 0, 0, "node" }, { "blender_279_default_6100_ascii", -1, 0, 17, 0, 0, "ator->allocs_left > 1" }, { "blender_279_default_6100_ascii", -1, 0, 1726, 0, 0, "mesh" }, { "blender_279_default_6100_ascii", -1, 0, 1728, 0, 0, "mesh" }, { "blender_279_default_6100_ascii", -1, 0, 1730, 0, 0, "conn" }, { "blender_279_default_6100_ascii", -1, 0, 1730, 0, 0, "ufbxi_add_connection(uc, object->id, geom_obj.id, NULL)" }, { "blender_279_default_6100_ascii", -1, 0, 18, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->top_nodes, &uc->to..." }, { "blender_279_default_6100_ascii", -1, 0, 19, 0, 0, "ufbxi_ascii_push_token_char(uc, token, c)" }, { "blender_279_default_6100_ascii", -1, 0, 19, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &token->str_data, &toke..." }, { "blender_279_default_6100_ascii", -1, 0, 272, 0, 0, "prop" }, { "blender_279_default_6100_ascii", -1, 0, 32, 0, 0, "ator->allocs_left > 1" }, { "blender_279_default_6100_ascii", -1, 0, 32, 0, 0, "node->vals" }, { "blender_279_default_6100_ascii", -1, 0, 329, 0, 0, "props->props" }, { "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "dst->props" }, { "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_merge_properties(uc, dst, &left, &right, buf)" }, { "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_sort_properties(uc, &left, &uc->tmp_sort)" }, { "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_sort_properties(uc, props, buf)" }, { "blender_279_default_6100_ascii", -1, 0, 333, 0, 0, "ufbxi_sort_properties(uc, &right, &uc->tmp_sort)" }, { "blender_279_default_6100_ascii", -1, 0, 334, 0, 0, "model" }, { "blender_279_default_6100_ascii", -1, 0, 416, 0, 0, "prop" }, { "blender_279_default_6100_ascii", -1, 0, 4613, 0, 0, "material" }, { "blender_279_default_6100_ascii", -1, 0, 4613, 0, 0, "ufbxi_read_material(uc, node, &object)" }, { "blender_279_default_6100_ascii", -1, 0, 4644, 0, 0, "str" }, { "blender_279_default_6100_ascii", -1, 0, 4644, 0, 0, "ufbxi_push_string_place_str(uc, &v->s)" }, { "blender_279_default_6100_ascii", -1, 0, 4678, 0, 0, "node->children" }, { "blender_279_default_6100_ascii", -1, 0, 4688, 0, 0, "ufbxi_add_connection(uc, parent_id, child_id, prop)" }, { "blender_279_default_6100_ascii", -1, 0, 4721, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 4722, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 4723, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 4724, 0, 0, "arr_data" }, { "blender_279_default_6100_ascii", -1, 0, 48, 0, 0, "node->children" }, { "blender_279_default_6100_ascii", -1, 0, 49, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." }, { "blender_279_default_6100_ascii", -1, 0, 49, 0, 0, "ufbxi_map_grow(&uc->connectable_map, ufbxi_connectable,..." }, { "blender_279_default_6100_ascii", -1, 0, 4965, 0, 0, "material" }, { "blender_279_default_6100_ascii", -1, 0, 4965, 0, 0, "ufbxi_read_material(uc, node, &object)" }, { "blender_279_default_6100_ascii", -1, 0, 4996, 0, 0, "str" }, { "blender_279_default_6100_ascii", -1, 0, 4996, 0, 0, "ufbxi_push_string_place_str(uc, &v->s)" }, { "blender_279_default_6100_ascii", -1, 0, 50, 0, 0, "data" }, { "blender_279_default_6100_ascii", -1, 0, 5042, 0, 0, "node->children" }, { "blender_279_default_6100_ascii", -1, 0, 5056, 0, 0, "ufbxi_add_connection(uc, parent_id, child_id, prop)" }, { "blender_279_default_6100_ascii", -1, 0, 5107, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 5108, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 5109, 0, 0, "v" }, { "blender_279_default_6100_ascii", -1, 0, 5110, 0, 0, "arr_data" }, { "blender_279_default_6100_ascii", -1, 0, 5232, 0, 0, "stack" }, { "blender_279_default_6100_ascii", -1, 0, 5234, 0, 0, "layer" }, { "blender_279_default_6100_ascii", -1, 0, 5236, 0, 0, "ufbxi_add_connection(uc, stack_id, layer_id, NULL)" }, { "blender_279_default_6100_ascii", -1, 0, 5237, 0, 0, "prop" }, { "blender_279_default_6100_ascii", -1, 0, 5239, 0, 0, "ufbxi_add_connection(uc, layer_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5240, 0, 0, "ufbxi_add_connection(uc, node_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5264, 0, 0, "curve" }, { "blender_279_default_6100_ascii", -1, 0, 5265, 0, 0, "ufbxi_add_connection(uc, parent_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5320, 0, 0, "conns" }, { "blender_279_default_6100_ascii", -1, 0, 5320, 0, 0, "ufbxi_finalize_scene(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 54, 0, 0, "ator->allocs_left > 1" }, { "blender_279_default_6100_ascii", -1, 0, 556, 0, 0, "props->props" }, { "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "dst->props" }, { "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_merge_properties(uc, dst, &left, &right, buf)" }, { "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_sort_properties(uc, &left, &uc->tmp_sort)" }, { "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_sort_properties(uc, props, buf)" }, { "blender_279_default_6100_ascii", -1, 0, 561, 0, 0, "ufbxi_sort_properties(uc, &right, &uc->tmp_sort)" }, { "blender_279_default_6100_ascii", -1, 0, 563, 0, 0, "model" }, { "blender_279_default_6100_ascii", -1, 0, 5667, 0, 0, "stack" }, { "blender_279_default_6100_ascii", -1, 0, 5669, 0, 0, "layer" }, { "blender_279_default_6100_ascii", -1, 0, 5671, 0, 0, "ufbxi_add_connection(uc, stack_id, layer_id, NULL)" }, { "blender_279_default_6100_ascii", -1, 0, 5680, 0, 0, "curve" }, { "blender_279_default_6100_ascii", -1, 0, 5681, 0, 0, "ufbxi_add_connection(uc, parent_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5691, 0, 0, "prop" }, { "blender_279_default_6100_ascii", -1, 0, 5693, 0, 0, "ufbxi_add_connection(uc, layer_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5696, 0, 0, "ufbxi_add_connection(uc, node_id, id, name)" }, { "blender_279_default_6100_ascii", -1, 0, 5835, 0, 0, "conns" }, { "blender_279_default_6100_ascii", -1, 0, 5835, 0, 0, "ufbxi_finalize_scene(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 64, 0, 0, "node->children" }, { "blender_279_default_6100_ascii", -1, 0, 64, 0, 0, "node->vals" }, { "blender_279_default_6100_ascii", -1, 0, 65, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." }, { "blender_279_default_6100_ascii", -1, 0, 65, 0, 0, "ufbxi_map_grow(&uc->connectable_map, ufbxi_connectable,..." }, { "blender_279_default_6100_ascii", -1, 0, 66, 0, 0, "data" }, { "blender_279_default_6100_ascii", -1, 0, 70, 0, 0, "ator->allocs_left > 1" }, { "blender_279_default_6100_ascii", -1, 0, 9, 0, 0, "ufbxi_load_maps(uc)" }, { "blender_279_default_6100_ascii", -1, 0, 9, 0, 0, "ufbxi_map_grow(&uc->prop_type_map, ufbxi_prop_type_name..." }, { "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_NAME)" }, { "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, 0, UFBXI_PARSE_ROOT, &end, &..." }, { "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_FBXHeaderExtension)" }, { "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_read_root(uc)" }, { "blender_279_default_6100_ascii", 1011, 0, 0, 0, 0, "ufbxi_check_string(*type)" }, { "blender_279_default_6100_ascii", 1011, 0, 0, 0, 0, "ufbxi_split_type_and_name(uc, type_and_name, &type_str,..." }, { "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_get_val2(node, \"SC\", &prop->name, (char**)&type..." }, { "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_read_properties(uc, node, &object.props, &uc->res..." }, { "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_read_property(uc, prop_node, prop, version)" }, { "blender_279_default_6100_ascii", 1266, 43, 0, 0, 0, "end == token->str_data + token->str_len - 1" }, { "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, 0, state, p_end, buf, true)" }, { "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_parse_toplevel_child_imp(uc, state, &uc->tmp_pars..." }, { "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &child)" }, { "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_read_header_extension(uc)" }, { "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "ufbxi_read_geometry(uc, node, &geom_obj)" }, { "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "ufbxi_read_model(uc, node, &object)" }, { "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "vertices && indices" }, { "blender_279_default_6100_ascii", 20654, 56, 0, 0, 0, "ix < mesh->num_vertices" }, { "blender_279_default_6100_ascii", 20694, 46, 0, 0, 0, "index_data[mesh->num_indices - 1] < 0" }, { "blender_279_default_6100_ascii", 20782, 76, 0, 0, 0, "ufbxi_find_val1(node, ufbxi_MappingInformationType, \"C..." }, { "blender_279_default_6100_ascii", 20782, 76, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &mesh->vertex_no..." }, { "blender_279_default_6100_ascii", 20807, 255, 0, 0, 0, "Invalid mapping" }, { "blender_279_default_6100_ascii", 20867, 77, 0, 0, 0, "data" }, { "blender_279_default_6100_ascii", 21690, 76, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." }, { "blender_279_default_6100_ascii", 21774, 76, 0, 0, 0, "arr && arr->size >= 1" }, { "blender_279_default_6100_ascii", 251, 0, 0, 0, 0, "depth == 0" }, { "blender_279_default_6100_ascii", 251, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, depth + 1, parse_state, &end..." }, { "blender_279_default_6100_ascii", 382, 0, 0, 0, 0, "c != '\\0'" }, { "blender_279_default_6100_ascii", 382, 0, 0, 0, 0, "ufbxi_ascii_next_token(uc, &ua->token)" }, { "blender_279_default_6100_ascii", 415, 255, 0, 0, 0, "end == token->str_data + token->str_len - 1" }, { "blender_279_default_6100_ascii", 454, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Definitions)" }, { "blender_279_default_6100_ascii", 645, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &object)" }, { "blender_279_default_6100_ascii", 645, 255, 0, 0, 0, "ufbxi_read_definitions(uc)" }, { "blender_279_default_6100_ascii", 73300, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Connections)" }, { "blender_279_default_6100_ascii", 73408, 255, 0, 0, 0, "ufbxi_parse_toplevel_child_imp(uc, state, &uc->tmp, &en..." }, { "blender_279_default_6100_ascii", 74083, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" }, { "blender_279_default_6100_ascii", 74083, 255, 0, 0, 0, "ufbxi_read_connections(uc)" }, { "blender_279_default_6100_ascii", 74100, 80, 0, 0, 0, "ufbxi_get_val_at(node, 3, 'C', (char**)&prop)" }, { "blender_279_default_6100_ascii", 74285, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Takes)" }, { "blender_279_default_6100_ascii", 74380, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" }, { "blender_279_default_6100_ascii", 74380, 255, 0, 0, 0, "ufbxi_read_takes(uc)" }, { "blender_279_default_6100_ascii", 74417, 255, 0, 0, 0, "ufbxi_get_val1(node, \"S\", &stack->name)" }, { "blender_279_default_6100_ascii", 74417, 255, 0, 0, 0, "ufbxi_read_take(uc, node)" }, { "blender_279_default_6100_ascii", 74431, 255, 0, 0, 0, "ufbxi_find_val2(node, ufbxi_ReferenceTime, \"LL\", &beg..." }, { "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_get_val1(child, \"C\", (char**)&old_name)" }, { "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_read_take_object(uc, child, layer_id)" }, { "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_read_take_prop_channel(uc, child, node_id, layer_..." }, { "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_find_val1(node, ufbxi_KeyCount, \"Z\", &num_keys)" }, { "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_read_take_anim_channel(uc, channel_nodes[i], id, ..." }, { "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_read_take_prop_channel(uc, child, node_id, layer_..." }, { "blender_279_default_6100_ascii", 74791, 48, 0, 0, 0, "data == data_end" }, { "blender_279_default_6100_ascii", 74791, 50, 0, 0, 0, "data_end - data >= 2" }, { "blender_279_default_6100_ascii", 74843, 75, 0, 0, 0, "Unknown key mode" }, { "blender_279_default_6100_ascii", 893, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Objects)" }, { "blender_279_default_6100_ascii", 997, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" }, { "blender_279_default_6100_ascii", 997, 255, 0, 0, 0, "ufbxi_read_objects(uc)" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 21089, "data" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 21265, "uc->read_fn" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 21265, "ufbxi_read_to(uc, decoded_data, encoded_size)" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 21297, "uc->read_fn" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 21297, "ufbxi_read_to(uc, decoded_data, encoded_size)" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 33, "header" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 49, "name" }, { "blender_279_default_7400_binary", -1, 0, 0, 0, 81, "name" }, { "blender_279_default_7400_binary", -1, 0, 0, 16, 0, "name" }, { "blender_279_default_7400_binary", -1, 0, 0, 214, 0, "edges" }, { "blender_279_default_7400_binary", -1, 0, 0, 239, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." }, { "blender_279_default_7400_binary", -1, 0, 0, 243, 0, "ufbxi_merge_properties(uc, &node->props, node->props.de..." }, { "blender_279_default_7400_binary", -1, 0, 0, 32, 0, "name" }, { "blender_279_default_7400_binary", -1, 0, 0, 387, 0, "edges" }, { "blender_279_default_7400_binary", -1, 0, 0, 439, 0, "ufbxi_merge_properties(uc, &node->props, node->props.de..." }, { "blender_279_default_7400_binary", -1, 0, 1004, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->read_buffer, &uc->..." }, { "blender_279_default_7400_binary", -1, 0, 1008, 0, 0, "arr" }, { "blender_279_default_7400_binary", -1, 0, 1009, 0, 0, "arr_data" }, { "blender_279_default_7400_binary", -1, 0, 1009, 0, 0, "data" }, { "blender_279_default_7400_binary", -1, 0, 1221, 0, 0, "uc->attributes" }, { "blender_279_default_7400_binary", -1, 0, 1244, 0, 0, "uc->geometries" }, { "blender_279_default_7400_binary", -1, 0, 1245, 0, 0, "ufbxi_merge_properties(uc, props, props->defaults, prop..." }, { "blender_279_default_7400_binary", -1, 0, 1345, 0, 0, "uc->templates" }, { "blender_279_default_7400_binary", -1, 0, 1385, 0, 0, "attr" }, { "blender_279_default_7400_binary", -1, 0, 1385, 0, 0, "ufbxi_read_node_attribute(uc, node, &object)" }, { "blender_279_default_7400_binary", -1, 0, 1492, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->read_buffer, &uc->..." }, { "blender_279_default_7400_binary", -1, 0, 1497, 0, 0, "arr_data" }, { "blender_279_default_7400_binary", -1, 0, 1497, 0, 0, "data" }, { "blender_279_default_7400_binary", -1, 0, 1522, 0, 0, "arr" }, { "blender_279_default_7400_binary", -1, 0, 158, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." }, { "blender_279_default_7400_binary", -1, 0, 16, 0, 0, "node" }, { "blender_279_default_7400_binary", -1, 0, 1715, 0, 0, "uc->attributes" }, { "blender_279_default_7400_binary", -1, 0, 1745, 0, 0, "uc->geometries" }, { "blender_279_default_7400_binary", -1, 0, 1746, 0, 0, "ufbxi_merge_properties(uc, props, props->defaults, prop..." }, { "blender_279_default_7400_binary", -1, 0, 1748, 0, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." }, { "blender_279_default_7400_binary", -1, 0, 220, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." }, { "blender_279_default_7400_binary", -1, 0, 32, 0, 0, "node" }, { "blender_279_default_7400_binary", -1, 0, 32, 0, 0, "vals" }, { "blender_279_default_7400_binary", -1, 0, 397, 0, 0, "tmpl" }, { "blender_279_default_7400_binary", -1, 0, 48, 0, 0, "vals" }, { "blender_279_default_7400_binary", -1, 0, 646, 0, 0, "tmpl" }, { "blender_279_default_7400_binary", -1, 0, 66, 0, 0, "node->children" }, { "blender_279_default_7400_binary", -1, 0, 84, 0, 0, "node->children" }, { "blender_279_default_7400_binary", -1, 0, 859, 0, 0, "uc->templates" }, { "blender_279_default_7400_binary", -1, 0, 898, 0, 0, "attr" }, { "blender_279_default_7400_binary", -1, 0, 898, 0, 0, "ufbxi_read_node_attribute(uc, node, &object)" }, { "blender_279_default_7400_binary", 18187, 0, 0, 0, 0, "ufbxi_check_string(*name)" }, { "blender_279_default_7400_binary", 18278, 0, 0, 0, 0, "ufbxi_read_properties(uc, node, &object.props, &uc->tmp..." }, { "blender_279_default_7400_binary", 21028, 255, 0, 0, 0, "ufbxi_read_geometry(uc, node, &object)" }, { "blender_279_default_7400_binary", 21081, 255, 0, 0, 0, "Bad multivalue array type" }, { "blender_279_default_7400_binary", 21081, 255, 0, 0, 0, "ufbxi_binary_parse_multivalue_array(uc, dst_type, arr_d..." }, { "blender_279_default_7400_binary", 21081, 99, 0, 0, 0, "res == (ptrdiff_t)decoded_data_size" }, { "blender_279_default_7400_binary", 21085, 255, 0, 0, 0, "size <= uc->opts.max_array_size" }, { "blender_279_default_7400_binary", 21086, 0, 0, 0, 0, "encoded_size == decoded_data_size" }, { "blender_279_default_7400_binary", 21086, 255, 0, 0, 0, "Bad array encoding" }, { "blender_279_default_7400_binary", 21209, 255, 0, 0, 0, "Bad multivalue array type" }, { "blender_279_default_7400_binary", 21349, 255, 0, 0, 0, "index_ix >= 0 && (size_t)index_ix < mesh->num_indices" }, { "blender_279_default_7400_binary", 24, 255, 0, 0, 0, "num_values64 <= (uint64_t)uc->opts.max_node_values" }, { "blender_279_default_7400_binary", 24, 255, 0, 0, 0, "ufbxi_binary_parse_node(uc, 0, UFBXI_PARSE_ROOT, &end, ..." }, { "blender_279_default_7400_binary", 25664, 255, 0, 0, 0, "data" }, { "blender_279_default_7400_binary", 31, 255, 0, 0, 0, "Bad values type" }, { "blender_279_default_7400_binary", 3210, 1, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &child)" }, { "blender_279_default_7400_binary", 3210, 1, 0, 0, 0, "ufbxi_read_document(uc)" }, { "blender_279_default_7400_binary", 331, 0, 0, 0, 0, "str || length == 0" }, { "blender_279_default_7400_binary", 331, 0, 0, 0, 0, "ufbxi_push_string_place_str(uc, &vals[i].s)" }, { "blender_279_default_7400_binary", 35, 255, 0, 0, 0, "ufbxi_binary_parse_node(uc, 0, state, p_end, buf, true)" }, { "blender_279_default_7400_binary", 355, 255, 0, 0, 0, "ufbxi_skip_bytes(uc, encoded_size)" }, { "blender_279_default_7400_binary", 36, 255, 0, 0, 0, "ufbxi_read_bytes(uc, (size_t)to_skip)" }, { "blender_279_default_7400_binary", 36, 255, 0, 0, 0, "ufbxi_skip_bytes(uc, values_end_offset - offset)" }, { "blender_279_default_7400_binary", 3698, 0, 0, 0, 0, "ufbxi_get_val1(object, \"C\", (char**)&tmpl->type)" }, { "blender_279_default_7400_binary", 3762, 0, 0, 0, 0, "ufbxi_get_val1(props, \"S\", &tmpl->sub_type)" }, { "blender_279_default_7400_binary", 3830, 0, 0, 0, 0, "ufbxi_read_properties(uc, props, &tmpl->props, &uc->tmp..." }, { "blender_279_default_7400_binary", 3868, 0, 0, 0, 0, "ufbxi_get_val_at(node, val_ix++, 'C', (char**)&subtype_..." }, { "blender_279_default_7400_binary", 58, 0, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Documents)" }, { "blender_279_default_7400_binary", 58, 255, 0, 0, 0, "current_offset == end_offset" }, { "blender_279_default_7400_binary", 66, 0, 0, 0, 0, "offset <= values_end_offset" }, { "blender_279_default_7400_binary", 70, 0, 0, 0, 0, "ufbxi_binary_parse_node(uc, depth + 1, parse_state, &en..." }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 7147, 0, "ufbxi_retain_array(uc, sizeof(ufbx_bone), &uc->scene.bo..." }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 7168, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_bone), &nodes, uc->..." }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 7169, 0, "skins" }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 9620, 0, "ufbxi_retain_array(uc, sizeof(ufbx_bone), &uc->scene.bo..." }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 9656, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_bone), &nodes, uc->..." }, { "blender_279_sausage_6100_ascii", -1, 0, 0, 9661, 0, "skins" }, { "blender_279_sausage_6100_ascii", -1, 0, 10006, 0, 0, "parent.skin_deformer->skin_index" }, { "blender_279_sausage_6100_ascii", -1, 0, 4824, 0, 0, "deformer" }, { "blender_279_sausage_6100_ascii", -1, 0, 4824, 0, 0, "ufbxi_read_deformer(uc, node, &object)" }, { "blender_279_sausage_6100_ascii", -1, 0, 4888, 0, 0, "skin" }, { "blender_279_sausage_6100_ascii", -1, 0, 5117, 0, 0, "uc->skin_deformers" }, { "blender_279_sausage_6100_ascii", -1, 0, 5191, 0, 0, "deformer" }, { "blender_279_sausage_6100_ascii", -1, 0, 5191, 0, 0, "ufbxi_read_deformer(uc, node, &object)" }, { "blender_279_sausage_6100_ascii", -1, 0, 5257, 0, 0, "skin" }, { "blender_279_sausage_6100_ascii", -1, 0, 5511, 0, 0, "uc->skin_deformers" }, { "blender_279_sausage_6100_ascii", -1, 0, 574, 0, 0, "bone" }, { "blender_279_sausage_6100_ascii", -1, 0, 6971, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." }, { "blender_279_sausage_6100_ascii", -1, 0, 7653, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." }, { "blender_279_sausage_6100_ascii", -1, 0, 816, 0, 0, "bone" }, { "blender_279_sausage_6100_ascii", -1, 0, 9006, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_PROP, ..." }, { "blender_279_sausage_6100_ascii", -1, 0, 9043, 0, 0, "uc->skin_clusters" }, { "blender_279_sausage_6100_ascii", -1, 0, 9927, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_PROP, ..." }, { "blender_279_sausage_6100_ascii", -1, 0, 9999, 0, 0, "uc->skin_clusters" }, { "blender_279_sausage_6100_ascii", 11936, 85, 0, 0, 0, "mesh->num_vertices > 0" }, { "blender_279_sausage_7400_binary", -1, 0, 2775, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." }, { "blender_279_sausage_7400_binary", -1, 0, 3232, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." }, { "blender_282_suzanne_7400_binary", -1, 0, 0, 160, 0, "mesh->uv_sets.data" }, { "blender_282_suzanne_7400_binary", -1, 0, 0, 97, 0, "mesh->uv_sets.data" }, { "blender_282_suzanne_7400_binary", 22907, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &set->vertex_uv...." }, { "blender_282_suzanne_7400_binary", 22994, 0, 0, 0, 0, "Invalid mapping" }, { "maya_anim_light_7500_binary", -1, 0, 1238, 0, 0, "ufbxi_merge_properties(uc, props, attr->defaults, props..." }, { "maya_anim_light_7500_binary", -1, 0, 907, 0, 0, "ufbxi_merge_properties(uc, props, attr->defaults, props..." }, { "maya_auto_clamp_7100_ascii", -1, 0, 1136, 0, 0, "v" }, { "maya_auto_clamp_7100_ascii", -1, 0, 1137, 0, 0, "v" }, { "maya_auto_clamp_7100_ascii", -1, 0, 816, 0, 0, "v" }, { "maya_auto_clamp_7100_ascii", -1, 0, 817, 0, 0, "v" }, { "maya_color_sets_6100_binary", -1, 0, 0, 126, 0, "mesh->color_sets.data" }, { "maya_color_sets_6100_binary", -1, 0, 0, 78, 0, "mesh->color_sets.data" }, { "maya_color_sets_6100_binary", 9909, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &set->vertex_col..." }, { "maya_cone_6100_binary", 15524, 255, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." }, { "maya_cone_6100_binary", 15571, 255, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->edge_crease, n, u..." }, { "maya_cone_6100_binary", 16031, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &mesh->vertex_cr..." }, { "maya_cube_6100_ascii", -1, 0, 0, 372, 0, "v" }, { "maya_cube_6100_ascii", -1, 0, 0, 560, 0, "v" }, { "maya_cube_6100_binary", -1, 0, 0, 0, 10701, "val" }, { "maya_cube_6100_binary", -1, 0, 0, 0, 10705, "val" }, { "maya_cube_6100_binary", -1, 0, 273, 0, 0, "arr_data" }, { "maya_cube_6100_binary", -1, 0, 442, 0, 0, "arr_data" }, { "maya_cube_6100_binary", -1, 0, 488, 0, 0, "binormals" }, { "maya_cube_6100_binary", -1, 0, 489, 0, 0, "tangents" }, { "maya_cube_6100_binary", -1, 0, 802, 0, 0, "binormals" }, { "maya_cube_6100_binary", -1, 0, 803, 0, 0, "tangents" }, { "maya_cube_6100_binary", 10525, 255, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." }, { "maya_cube_6100_binary", 10572, 255, 0, 0, 0, "arr" }, { "maya_cube_6100_binary", 10572, 255, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->edge_smoothing, n..." }, { "maya_cube_6100_binary", 10670, 255, 0, 0, 0, "Bad multivalue array type" }, { "maya_cube_6100_binary", 6763, 23, 0, 0, 0, "vertices->size % 3 == 0" }, { "maya_cube_6100_binary", 7448, 71, 0, 0, 0, "data->size % num_components == 0" }, { "maya_cube_6100_binary", 8164, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &layer->elem.dat..." }, { "maya_cube_6100_binary", 9038, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &layer->elem.dat..." }, { "maya_cube_7100_ascii", 8925, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_INT)" }, { "maya_cube_7100_ascii", 8929, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_NAME)" }, { "maya_cube_7100_ascii", 8935, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, '}')" }, { "maya_cube_7100_binary", -1, 0, 0, 132, 0, "ufbxi_push_string_place_str(uc, type)" }, { "maya_cube_7100_binary", -1, 0, 0, 133, 0, "ufbxi_merge_properties(uc, &stack->props, object->props..." }, { "maya_cube_7100_binary", -1, 0, 0, 137, 0, "ufbxi_merge_properties(uc, &layer->layer_props, object-..." }, { "maya_cube_7100_binary", -1, 0, 0, 225, 0, "ufbxi_push_string_place_str(uc, type)" }, { "maya_cube_7100_binary", -1, 0, 0, 226, 0, "ufbxi_merge_properties(uc, &stack->props, object->props..." }, { "maya_cube_7100_binary", -1, 0, 0, 233, 0, "ufbxi_merge_properties(uc, &layer->layer_props, object-..." }, { "maya_cube_7100_binary", -1, 0, 1117, 0, 0, "stack" }, { "maya_cube_7100_binary", -1, 0, 1117, 0, 0, "ufbxi_read_animation_stack(uc, node, &object)" }, { "maya_cube_7100_binary", -1, 0, 1121, 0, 0, "layer" }, { "maya_cube_7100_binary", -1, 0, 1121, 0, 0, "ufbxi_read_animation_layer(uc, node, &object)" }, { "maya_cube_7100_binary", -1, 0, 658, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->convert_buffer, &u..." }, { "maya_cube_7100_binary", -1, 0, 807, 0, 0, "stack" }, { "maya_cube_7100_binary", -1, 0, 807, 0, 0, "ufbxi_read_animation_stack(uc, node, &object)" }, { "maya_cube_7100_binary", -1, 0, 811, 0, 0, "layer" }, { "maya_cube_7100_binary", -1, 0, 811, 0, 0, "ufbxi_read_animation_layer(uc, node, &object)" }, { "maya_cube_7100_binary", -1, 0, 965, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->convert_buffer, &u..." }, { "maya_game_sausage_6100_binary_deform", 44932, 98, 0, 0, 0, "data_end - data >= 2" }, { "maya_game_sausage_6100_binary_wiggle", 53586, 79, 0, 0, 0, "data_end - data >= 1" }, { "maya_game_sausage_6100_binary", 45318, 0, 0, 0, 0, "indices->size == weights->size" }, { "maya_game_sausage_6100_binary", 45470, 0, 0, 0, 0, "transform->size >= 16" }, { "maya_game_sausage_6100_binary", 45636, 0, 0, 0, 0, "transform_link->size >= 16" }, { "maya_interpolation_modes_6100_binary", 16706, 0, 0, 0, 0, "ufbxi_get_val1(node, \"c\", (char**)&type_and_name)" }, { "maya_interpolation_modes_6100_binary", 16930, 255, 0, 0, 0, "size <= UFBXI_MAX_ALLOCATION_SIZE" }, { "maya_interpolation_modes_6100_binary", 16936, 0, 0, 0, 0, "data_end - data >= 2" }, { "maya_interpolation_modes_6100_binary", 16936, 73, 0, 0, 0, "data_end - data >= 1" }, { "maya_interpolation_modes_6100_binary", 16969, 255, 0, 0, 0, "Unknown slope mode" }, { "maya_interpolation_modes_6100_binary", 16989, 255, 0, 0, 0, "Unknown weight mode" }, { "maya_interpolation_modes_7500_ascii", -1, 0, 1168, 0, 0, "v" }, { "maya_interpolation_modes_7500_ascii", -1, 0, 851, 0, 0, "v" }, { "maya_interpolation_modes_7500_ascii", 13357, 43, 0, 0, 0, "times->size == values->size" }, { "maya_interpolation_modes_7500_ascii", 14126, 43, 0, 0, 0, "attr_flags->size == refs->size" }, { "maya_interpolation_modes_7500_ascii", 15551, 43, 0, 0, 0, "attrs->size == refs->size * 4u" }, { "maya_interpolation_modes_7500_binary", -1, 0, 0, 143, 0, "keys" }, { "maya_interpolation_modes_7500_binary", -1, 0, 0, 243, 0, "keys" }, { "maya_interpolation_modes_7500_binary", -1, 0, 1177, 0, 0, "curve" }, { "maya_interpolation_modes_7500_binary", -1, 0, 1234, 0, 0, "prop" }, { "maya_interpolation_modes_7500_binary", -1, 0, 1234, 0, 0, "ufbxi_read_animation_curve_node(uc, node, &object)" }, { "maya_interpolation_modes_7500_binary", -1, 0, 861, 0, 0, "curve" }, { "maya_interpolation_modes_7500_binary", -1, 0, 897, 0, 0, "prop" }, { "maya_interpolation_modes_7500_binary", -1, 0, 897, 0, 0, "ufbxi_read_animation_curve_node(uc, node, &object)" }, { "maya_interpolation_modes_7500_binary", 24310, 255, 0, 0, 0, "times = ufbxi_find_array(node, ufbxi_KeyTime, 'l')" }, { "maya_interpolation_modes_7500_binary", 24310, 255, 0, 0, 0, "ufbxi_read_animation_curve(uc, node, &object)" }, { "maya_interpolation_modes_7500_binary", 24387, 255, 0, 0, 0, "values = ufbxi_find_array(node, ufbxi_KeyValueFloat, 'r..." }, { "maya_interpolation_modes_7500_binary", 24418, 255, 0, 0, 0, "Bad multivalue array type" }, { "maya_interpolation_modes_7500_binary", 24528, 255, 0, 0, 0, "attr_flags = ufbxi_find_array(node, ufbxi_KeyAttrFlags,..." }, { "maya_interpolation_modes_7500_binary", 24627, 255, 0, 0, 0, "attrs = ufbxi_find_array(node, ufbxi_KeyAttrDataFloat, ..." }, { "maya_interpolation_modes_7500_binary", 24724, 255, 0, 0, 0, "refs = ufbxi_find_array(node, ufbxi_KeyAttrRefCount, 'i..." }, { "maya_interpolation_modes_7500_binary", 24765, 255, 0, 0, 0, "Bad multivalue array type" }, { "maya_interpolation_modes_7500_binary", 25023, 0, 0, 0, 0, "refs_left >= 0" }, }; void ufbxt_do_file_test(const char *name, void (*test_fn)(ufbx_scene *s, ufbxt_diff_error *err), const char *suffix, ufbx_load_opts user_opts) { const uint32_t file_versions[] = { 6100, 7100, 7400, 7500, 7700 }; char buf[512]; snprintf(buf, sizeof(buf), "%s%s.obj", data_root, name); size_t obj_size = 0; void *obj_data = ufbxt_read_file(buf, &obj_size); ufbxt_obj_file *obj_file = obj_data ? ufbxt_load_obj(obj_data, obj_size) : NULL; free(obj_data); char base_name[512]; ufbxt_begin_fuzz(); uint32_t num_opened = 0; for (uint32_t vi = 0; vi < ufbxt_arraycount(file_versions); vi++) { for (uint32_t fi = 0; fi < 2; fi++) { uint32_t version = file_versions[vi]; const char *format = fi == 1 ? "ascii" : "binary"; if (suffix) { snprintf(buf, sizeof(buf), "%s%s_%u_%s_%s.fbx", data_root, name, version, format, suffix); snprintf(base_name, sizeof(base_name), "%s_%u_%s_%s", name, version, format, suffix); } else { snprintf(buf, sizeof(buf), "%s%s_%u_%s.fbx", data_root, name, version, format); snprintf(base_name, sizeof(base_name), "%s_%u_%s", name, version, format); } if (g_file_version && version != g_file_version) continue; if (g_file_type && strcmp(format, g_file_type)) continue; size_t size = 0; void *data = ufbxt_read_file(buf, &size); if (!data) continue; num_opened++; ufbxt_logf("%s", buf); ufbx_error error; ufbx_load_opts load_opts = user_opts; if (g_dedicated_allocs) { load_opts.temp_huge_size = 1; load_opts.result_huge_size = 1; } uint64_t load_begin = cputime_cpu_tick(); ufbx_scene *scene = ufbx_load_memory(data, size, &load_opts, &error); uint64_t load_end = cputime_cpu_tick(); if (!scene) { ufbxt_log_error(&error); ufbxt_assert_fail(__FILE__, __LINE__, "Failed to parse file"); } ufbx_load_opts stream_opts = load_opts; stream_opts.read_buffer_size = 1; ufbx_scene *streamed_scene = ufbx_load_file(buf, &stream_opts, &error); if (streamed_scene) { ufbxt_check_scene(scene); } else { ufbxt_log_error(&error); ufbxt_assert_fail(__FILE__, __LINE__, "Failed to parse streamed file"); } ufbx_free_scene(streamed_scene); // Ignore geometry, animations, and both { ufbx_load_opts opts = load_opts; opts.ignore_geometry = true; ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL); ufbxt_check_scene(scene); ufbx_free_scene(ignore_scene); } { ufbx_load_opts opts = load_opts; opts.ignore_animation = true; ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL); ufbxt_check_scene(scene); ufbx_free_scene(ignore_scene); } { ufbx_load_opts opts = load_opts; opts.ignore_geometry = true; opts.ignore_animation = true; ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL); ufbxt_check_scene(scene); ufbx_free_scene(ignore_scene); } ufbxt_logf(".. Loaded in %.2fms: File %.1fkB, temp %.1fkB (%zu allocs), result %.1fkB (%zu allocs)", cputime_cpu_delta_to_sec(NULL, load_end - load_begin) * 1e3, (double)size * 1e-3, (double)scene->metadata.temp_memory_used * 1e-3, scene->metadata.temp_allocs, (double)scene->metadata.result_memory_used * 1e-3, scene->metadata.result_allocs ); ufbxt_assert(scene->metadata.ascii == ((fi == 1) ? 1 : 0)); ufbxt_assert(scene->metadata.version == version); ufbxt_check_scene(scene); ufbxt_diff_error err = { 0 }; if (obj_file) { ufbxt_diff_to_obj(scene, obj_file, &err, false); } test_fn(scene, &err); if (err.num > 0) { ufbx_real avg = err.sum / (ufbx_real)err.num; ufbxt_logf(".. Absolute diff: avg %.3g, max %.3g (%zu tests)", avg, err.max, err.num); } size_t temp_allocs = scene->metadata.temp_allocs; size_t result_allocs = scene->metadata.result_allocs; ufbx_free_scene(scene); if (g_fuzz) { size_t step = 0; size_t fail_step = 0; int i; g_fuzz_test_name = base_name; #pragma omp parallel for schedule(static, 16) for (i = 0; i < (int)temp_allocs; i++) { if (omp_get_thread_num() == 0) { if (i % 16 == 0) { fprintf(stderr, "\rFuzzing temp limit %s: %d/%d", base_name, i, (int)temp_allocs); fflush(stderr); } } size_t step = 10000000 + (size_t)i; if (!ufbxt_test_fuzz(data, size, step, -1, (size_t)i, 0, 0)) fail_step = step; } fprintf(stderr, "\rFuzzing temp limit %s: %d/%d\n", base_name, (int)temp_allocs, (int)temp_allocs); #pragma omp parallel for schedule(static, 16) for (i = 0; i < (int)result_allocs; i++) { if (omp_get_thread_num() == 0) { if (i % 16 == 0) { fprintf(stderr, "\rFuzzing result limit %s: %d/%d", base_name, i, (int)result_allocs); fflush(stderr); } } size_t step = 20000000 + (size_t)i; if (!ufbxt_test_fuzz(data, size, step, -1, 0, (size_t)i, 0)) fail_step = step; } fprintf(stderr, "\rFuzzing result limit %s: %d/%d\n", base_name, (int)result_allocs, (int)result_allocs); #pragma omp parallel for schedule(static, 16) for (i = 1; i < (int)size; i++) { if (omp_get_thread_num() == 0) { if (i % 16 == 0) { fprintf(stderr, "\rFuzzing truncate %s: %d/%d", base_name, i, (int)size); fflush(stderr); } } size_t step = 30000000 + (size_t)i; if (!ufbxt_test_fuzz(data, size, step, -1, 0, 0, (size_t)i)) fail_step = step; } fprintf(stderr, "\rFuzzing truncate %s: %d/%d\n", base_name, (int)size, (int)size); if (!g_no_patch) { uint8_t *data_copy[256] = { 0 }; int patch_start = g_patch_start - omp_get_num_threads() * 16; if (patch_start < 0) { patch_start = 0; } #pragma omp parallel for schedule(static, 16) for (i = patch_start; i < (int)size; i++) { if (omp_get_thread_num() == 0) { if (i % 16 == 0) { fprintf(stderr, "\rFuzzing patch %s: %d/%d", base_name, i, (int)size); fflush(stderr); } } uint8_t **p_data_copy = &data_copy[omp_get_thread_num()]; if (*p_data_copy == NULL) { *p_data_copy = malloc(size); memcpy(*p_data_copy, data, size); } uint8_t *data_u8 = *p_data_copy; size_t step = i * 1000; uint8_t original = data_u8[i]; if (g_all_byte_values) { for (uint32_t v = 0; v < 256; v++) { data_u8[i] = (uint8_t)v; if (!ufbxt_test_fuzz(data_u8, size, step + v, i, 0, 0, 0)) fail_step = step + v; } } else { data_u8[i] = original + 1; if (!ufbxt_test_fuzz(data_u8, size, step + 1, i, 0, 0, 0)) fail_step = step + 1; data_u8[i] = original - 1; if (!ufbxt_test_fuzz(data_u8, size, step + 2, i, 0, 0, 0)) fail_step = step + 2; if (original != 0) { data_u8[i] = 0; if (!ufbxt_test_fuzz(data_u8, size, step + 3, i, 0, 0, 0)) fail_step = step + 3; } if (original != 0xff) { data_u8[i] = 0xff; if (!ufbxt_test_fuzz(data_u8, size, step + 4, i, 0, 0, 0)) fail_step = step + 4; } } data_u8[i] = original; } fprintf(stderr, "\rFuzzing patch %s: %d/%d\n", base_name, (int)size, (int)size); for (size_t i = 0; i < ufbxt_arraycount(data_copy); i++) { free(data_copy[i]); } } ufbxt_hintf("Fuzz failed on step: %zu", step); ufbxt_assert(fail_step == 0); } else { uint8_t *data_u8 = (uint8_t*)data; // Run a couple of known fuzz checks for (size_t i = 0; i < ufbxt_arraycount(g_fuzz_checks); i++) { const ufbxt_fuzz_check *check = &g_fuzz_checks[i]; if (strcmp(check->name, base_name)) continue; uint8_t original; if (check->patch_offset >= 0) { original = data_u8[check->patch_offset]; ufbxt_logf(".. Patch byte %u from 0x%02x to 0x%02x: %s", check->patch_offset, original, check->patch_value, check->description); ufbxt_assert(check->patch_offset < size); data_u8[check->patch_offset] = check->patch_value; } ufbx_load_opts opts = { 0 }; if (check->temp_limit > 0) { ufbxt_logf(".. Temp limit %u: %s", check->temp_limit, check->description); opts.max_temp_allocs = check->temp_limit; } if (check->result_limit > 0) { ufbxt_logf(".. Result limit %u: %s", check->result_limit, check->description); opts.max_result_allocs = check->result_limit; } size_t truncated_size = size; if (check->truncate_length > 0) { ufbxt_logf(".. Truncated length %u: %s", check->truncate_length, check->description); truncated_size = check->truncate_length; } ufbx_error error; ufbx_scene *scene = ufbx_load_memory(data, truncated_size, &opts, &error); if (scene) { ufbxt_check_scene(scene); ufbx_free_scene(scene); } if (check->patch_offset >= 0) { data_u8[check->patch_offset] = original; } } } free(data); } } if (num_opened == 0) { ufbxt_assert_fail(__FILE__, __LINE__, "File not found"); } free(obj_file); } #define UFBXT_IMPL 1 #define UFBXT_TEST(name) void ufbxt_test_fn_##name(void) #define UFBXT_FILE_TEST(name) void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err); \ void ufbxt_test_fn_file_##name(void) { \ ufbx_load_opts user_opts = { 0 }; \ ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name, NULL, user_opts); } \ void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err) #define UFBXT_FILE_TEST_OPTS(name, get_opts) void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err); \ void ufbxt_test_fn_file_##name(void) { \ ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name, NULL, get_opts); } \ void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err) #define UFBXT_FILE_TEST_SUFFIX(name, suffix) void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err); \ void ufbxt_test_fn_file_##name##_##suffix(void) { \ ufbx_load_opts user_opts = { 0 }; \ ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name##_##suffix, #suffix, user_opts); } \ void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err) #define UFBXT_FILE_TEST_SUFFIX_OPTS(name, suffix, get_opts) void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err); \ void ufbxt_test_fn_file_##name##_##suffix(void) { \ ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name##_##suffix, #suffix, get_opts); } \ void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err) #include "all_tests.h" #undef UFBXT_IMPL #undef UFBXT_TEST #undef UFBXT_FILE_TEST #undef UFBXT_FILE_TEST_OPTS #undef UFBXT_FILE_TEST_SUFFIX #undef UFBXT_FILE_TEST_SUFFIX_OPTS #define UFBXT_IMPL 0 #define UFBXT_TEST(name) { #name, &ufbxt_test_fn_##name }, #define UFBXT_FILE_TEST(name) { #name, &ufbxt_test_fn_file_##name }, #define UFBXT_FILE_TEST_OPTS(name, get_opts) { #name, &ufbxt_test_fn_file_##name }, #define UFBXT_FILE_TEST_SUFFIX(name, suffix) { #name "_" #suffix, &ufbxt_test_fn_file_##name##_##suffix }, #define UFBXT_FILE_TEST_SUFFIX_OPTS(name, suffix, get_opts) { #name "_" #suffix, &ufbxt_test_fn_file_##name##_##suffix }, ufbxt_test g_tests[] = { #include "all_tests.h" }; int ufbxt_run_test(ufbxt_test *test) { printf("%s: ", test->name); fflush(stdout); g_error.stack_size = 0; g_hint[0] = '\0'; g_current_test = test; if (!setjmp(g_test_jmp)) { g_skip_print_ok = false; test->func(); if (!g_skip_print_ok) { printf("OK\n"); fflush(stdout); } return 1; } else { if (g_hint[0]) { ufbxt_logf("Hint: %s", g_hint); } if (g_error.stack_size) { ufbxt_log_error(&g_error); } return 0; } } int main(int argc, char **argv) { uint32_t num_tests = ufbxt_arraycount(g_tests); uint32_t num_ok = 0; const char *test_filter = NULL; cputime_init(); for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-v")) { g_verbose = 1; } if (!strcmp(argv[i], "-t")) { if (++i < argc) { test_filter = argv[i]; } } if (!strcmp(argv[i], "-d")) { if (++i < argc) { size_t len = strlen(argv[i]); if (len + 2 > sizeof(data_root)) { fprintf(stderr, "-d: Data root too long"); return 1; } memcpy(data_root, argv[i], len); char end = argv[i][len - 1]; if (end != '/' && end != '\\') { data_root[len] = '/'; data_root[len + 1] = '\0'; } } } if (!strcmp(argv[i], "-f")) { if (++i < argc) g_file_version = (uint32_t)atoi(argv[i]); if (++i < argc) g_file_type = argv[i]; } if (!strcmp(argv[i], "--fuzz")) { g_fuzz = true; } if (!strcmp(argv[i], "--patch-all-byte-values")) { g_all_byte_values = true; } if (!strcmp(argv[i], "--patch-start")) { if (++i < argc) g_patch_start = atoi(argv[i]); } if (!strcmp(argv[i], "--dedicated-allocs")) { g_dedicated_allocs = true; } if (!strcmp(argv[i], "--no-patch")) { g_no_patch = true; } if (!strcmp(argv[i], "--threads")) { #if _OPENMP if (++i < argc) omp_set_num_threads(atoi(argv[i])); #endif } if (!strcmp(argv[i], "--fuzz-step")) { if (++i < argc) g_fuzz_step = (size_t)atoi(argv[i]); } } #ifdef _OPENMP if (omp_get_num_threads() > 256) { omp_set_num_threads(256); } #else if (g_fuzz) { fprintf(stderr, "Fuzzing without threads, compile with OpenMP for better performance!\n"); } #endif uint32_t num_ran = 0; for (uint32_t i = 0; i < num_tests; i++) { ufbxt_test *test = &g_tests[i]; if (test_filter && strcmp(test->name, test_filter)) { continue; } num_ran++; if (ufbxt_run_test(test)) { num_ok++; } ufbxt_log_flush(); } if (num_ok < num_tests) { printf("\n"); for (uint32_t i = 0; i < num_tests; i++) { ufbxt_test *test = &g_tests[i]; if (test->fail.failed) { ufbxt_fail *fail = &test->fail; const char *file = fail->file, *find; find = strrchr(file, '/'); file = find ? find + 1 : file; find = strrchr(file, '\\'); file = find ? find + 1 : file; printf("(%s) %s:%u: %s\n", test->name, file, fail->line, fail->expr); } } } printf("\nTests passed: %u/%u\n", num_ok, num_ran); if (g_fuzz) { printf("Fuzz checks:\n\nstatic const ufbxt_fuzz_check g_fuzz_checks[] = {\n"); for (size_t i = 0; i < ufbxt_arraycount(g_checks); i++) { ufbxt_check_line *check = &g_checks[i]; if (check->patch_offset == 0) continue; char safe_desc[60]; size_t safe_desc_len = 0; for (const char *c = check->description; *c; c++) { if (sizeof(safe_desc) - safe_desc_len < 6) { safe_desc[safe_desc_len++] = '.'; safe_desc[safe_desc_len++] = '.'; safe_desc[safe_desc_len++] = '.'; break; } if (*c == '"' || *c == '\\') { safe_desc[safe_desc_len++] = '\\'; } safe_desc[safe_desc_len++] = *c; } safe_desc[safe_desc_len] = '\0'; int32_t patch_offset = check->patch_offset != UINT32_MAX ? (int32_t)(check->patch_offset - 1) : -1; printf("\t{ \"%s\", %d, %u, %u, %u, %u, \"%s\" },\n", check->test_name, patch_offset, (uint32_t)check->patch_value, (uint32_t)check->temp_limit, (uint32_t)check->result_limit, (uint32_t)check->truncate_length, safe_desc); free(check->test_name); } printf("};\n"); } return num_ok == num_ran ? 0 : 1; }
vote.h
/******************************************************************************* * vote.h - voting implementations ******************************************************************************* * Add license here... *******************************/ #ifndef VOTE_H #define VOTE_H #include <filter.h> #include <gb.h> #include <nnf.h> #include <patch.h> #include <rng.h> #include <voting/histogram.h> #include <voting/meanshift.h> namespace pm { #define NOT_NULL(type) reinterpret_cast<type *>(1) /** * \brief The method of vote */ enum VoteMethod { /** * \brief Bidirectional Similarity vothing * \see http://www.wisdom.weizmann.ac.il/~vision/VisualSummary.html */ BiDirSimVoting, /** * \brief Bidirectional Similarity with Histogram voting */ BiDirSimWithHistogramVoting, /** * \brief Default voting per pixel (over the corresponding overlapping patches) */ DefaultVoting, /** * \brief Default voting per pixel, with patch gain+bias adjustment */ DefaultGBAVoting, /** * \brief Direct pixel voting */ DirectVoting, /** * \brief Feature-specific voting with histograms for the rest */ FeatureWithHistogramVoting, /** * \brief Voting per pixel, with histogram reweighting */ HistogramVoting, /** * \brief Mean-shift voting per pixel (over the corresponding overlapping patches) */ MeanShiftVoting, /** * \brief Median voting per pixel (over the corresponding overlapping patches) */ MedianVoting, /** * \brief Tiled voting per patch (over the corresponding overlapped pixels) */ TiledVoting, /** * \brief Default voting using a mask as global weight mask */ WeightedVoting }; /** * \brief Type of output weight */ enum WeightType { NoWeight, PixelWeight, PixelVariance }; typedef const float& (*BinFloatOp)(const float&, const float&); /** * Parameters for voting */ struct VoteParams { // General: int patchSize; VoteMethod method; Filter filter; // Output data: WeightType weightType; float *weights; // BiDir Simimilarity: float bidirSimWeight; // Mean-shift: voting::MeanShiftParams meanShift; // Histogram: std::vector<int> histChannels; std::vector<int> histBins; std::vector<Range> histRanges; std::vector<float> histWeights; std::vector<float> histBoosts; bool histNormalize; // Weighted: Mask weightMask; float weightBase; // Feature-specific: BinFloatOp featureOp; float featureDefault; std::vector<int> binaryChannels; VoteParams() : patchSize(7), method(DefaultVoting), filter(1), weightType(NoWeight), weights(NULL), bidirSimWeight(0.5f), histChannels(), histBins(), histRanges(), histWeights(), histBoosts(), histNormalize(true), weightBase(2.0f), featureOp(NULL), binaryChannels(){ } }; template <typename Patch, typename Scalar> Image vote(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params); template <int channels, typename Patch, typename Scalar> inline float pixelvar(const NearestNeighborField<Patch, Scalar> *nnf, int y, int startY, int endY, int x, int startX, int endX, Vec<Scalar, channels> mean) { typedef Vec<Scalar, channels> PixVal; const Texture *target = nnf->target; // variance sum float w = 0; int N = 0; // for each pixel from the corresponding patches for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = target->at<PixVal>(patch.transform(by, bx)); PixVal diff = mean - value; w += diff.dot(diff); ++N; } } return w / N; } /** * \brief Vote by using a simple filter over overlapping patches * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_filter_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); const Filter &filter = params.filter; // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { PixVal &votedPixel = vote.at<PixVal>(y, x); float w = 0.0f; // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); // /!\ Convolution: the filter is reversed in time // => at pos (x,y), the filter is at (0, 0) // => at pos (x-dx, y-dy), the filter is at (dx, dy) // ++ we assume that only patches on the left / top have a contribution int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = target->at<PixVal>(patch.transform(by, bx)); votedPixel += value * filter[by][bx]; w += filter[by][bx]; } } if (w > 1e-8) { votedPixel *= 1.0 / w; } else { #if _OPENMP #pragma omp critical #endif { std::cerr << "w0 @" << y << "/" << x << ", from: " << startY << "/" << startX; std::cerr << " to " << endY << "/" << endX << "\n"; } } if (votedPixel[0] > 1e10) { #if _OPENMP #pragma omp critical #endif { std::cerr << "Corrupted vote p@" << y << "/" << x << ": "; for (int i = 0; i < channels; ++i) std::cerr << votedPixel[i] << ", "; std::cerr << "\n"; } } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } // store in output weight map if there is one switch(params.weightType) { case PixelWeight: params.weights[source->cols * y + x] = w; break; case PixelVariance: params.weights[source->cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, votedPixel ); break; } } } return vote; } /** * \brief Vote by using a simple filter over overlapping patches * and gain+bias adjustment of patches * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_filter_gba_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); int total = source->rows * source->cols; const Filter &filter = params.filter; // 1 = compute gain+bias of each target patch typedef GainBias<Scalar> GB; typedef typename GB::Vec3 Vec3; Vec3 *gains = new Vec3[nnf->size], *biases = new Vec3[nnf->size]; #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < nnf->height; ++y) { for (int x = 0; x < nnf->width; ++x) { const typename Patch::OriginalPatchType srcPatch(y, x); const Patch &patch = nnf->get(y, x); GB::template compute<channels>(source, target, srcPatch, patch, gains[nnf->width * y + x], biases[nnf->width * y + x]); } } // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { float w = 0.0f; // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); // /!\ Convolution: the filter is reversed in time // => at pos (x,y), the filter is at (0, 0) // => at pos (x-dx, y-dy), the filter is at (dx, dy) // ++ we assume that only patches on the left / top have a contribution int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = target->at<PixVal>(patch.transform(by, bx)); // apply gain+bias adjustment GB::template applyOn<channels>(value, gains[nnf->width * py + px], biases[nnf->width * py + px]); // register the pixel vote vote.at<PixVal>(y, x) += value * filter[by][bx]; w += filter[by][bx]; } } if (w > 1e-8) { vote.at<PixVal>(y, x) *= 1.0 / w; } // store in output weight map if there is one if (params.weightType == PixelWeight) params.weights[source->cols * y + x] = w; } } return vote; } /** * \brief Direct vote * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_direct_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); int total = source->rows * source->cols; if (params.weightType != NoWeight) { // store in output weight map if there is one switch(params.weightType) { case PixelWeight: std::fill(params.weights, params.weights + total, 1.0f); break; case PixelVariance: std::fill(params.weights, params.weights + total, 0.0f); break; } } // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { // patch position int py = std::min(y, nnf->height - 1); int px = std::min(x, nnf->width - 1); const Patch &patch = nnf->get(py, px); // pixel within patch int by = y - py; int bx = x - px; vote.at<PixVal>(y, x) = target->at<PixVal>(patch.transform(by, bx)); } } return vote; } /** * \brief Vote by using a simple filter over overlapping patches * and a tiled strategy with openmp * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_filter_tiled_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; const int h = nnf->height, w = nnf->width; //< nnf bounds const int sh = source->rows, sw = source->cols; //< voted image bounds Image vote = Image::zeros(sh, sw, IM_32FC(channels)); int total = source->rows * source->cols; float *weights = new float[total](); // /!\ Use default constructor! if (params.weights != NULL) { switch(params.weightType) { case PixelWeight: params.weights = weights; break; case PixelVariance: std::fill(params.weights, params.weights + total, 0.0f); //< no valid break; } } const Filter &filter = params.filter; #if _OPENMP #pragma omp parallel #endif { int tid = omp_get_thread_num(), nt = omp_get_num_threads(); int patchY = std::ceil(float(h) / Patch::width()); int patchX = std::ceil(float(w) / Patch::width()); int tiles = patchY * patchX; int minTile = tiles * tid / nt; int maxTile = tiles * (tid + 1) / nt; // for each tile for (int tile = minTile; tile < maxTile; ++tile) { // for each patch (pixel) of the corresponding tile int x = (tile % patchX) * Patch::width(); int nX = std::min(x + Patch::width(), w); for (; x < nX; ++x) { int y = (tile / patchX) * Patch::width(); int nY = std::min(y + Patch::width(), h); for (; y < nY; ++y) { // /!\ This ^ was done over the nnf field that is smaller than the source! const Patch &patch = nnf->get(y, x); // for each pixel in the corresponding patch for (int px = 0; px < Patch::width(); ++px) { for (int py = 0; py < Patch::width(); ++py) { // /!\ <-- this is done over the source (nff + patch size) PixVal value = target->at<PixVal>(patch.transform(py, px)); vote.at<PixVal>(y + py, x + px) += value * filter[py][px]; weights[sw * (y + py) + x + px] += filter[py][px]; } } } } } } std::cout << ". normalizing\n"; // normalization of the pixel values #if _OPENMP #pragma omp for collapse(2) #endif for (int y = 0; y < sh; ++y) { for (int x = 0; x < sw; ++x) { float &w = weights[sw * y + x]; if (w > 1e-8) { vote.at<PixVal>(y, x) *= 1.0 / w; } else { w = 0.0f; vote.at<PixVal>(y, x) *= 0.0; } } } // free and return if (params.weights == NULL) { // nobody will still use it => free it! delete[] weights; } return vote; } template <int channels, typename Patch, typename Scalar> Image vote_meanshift_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; typedef voting::Cluster<Scalar, channels> Cluster; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); // mean-shift voting on the feature space // generated by the pixels on (startY:endY-1, startX:endX-1) // - use params.meanShiftWindows to discard typedef std::vector<PixVal> PixCluster; // workspace int pw = endX - startX; int ph = endY - startY; int N = pw * ph; std::vector<PixVal> points(N, PixVal()); //< data points int pidx = 0; for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px, ++pidx) { const Patch &patch = nnf->get(py, px); points[pidx] = target->at<PixVal>(patch.transform(y - py, x - px)); // * filter[by][bx]; } } // mean-shift algorithm std::vector<Cluster> clusters; clusters.reserve(5); voting::meanshift(points, clusters, params.meanShift); // pixel vote using the best cluster int best = 0; for (int i = 1, n = clusters.size(); i < n; ++i) { if (clusters[i].votes > clusters[best].votes) { best = i; } } vote.at<PixVal>(y, x) = clusters[best].mean; switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = float(clusters[best].votes); break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, vote.at<PixVal>(y, x) ); break; } } } return vote; } template <int K, typename Scalar> class MedianList{ public: typedef float Weight; typedef std::pair<Scalar, Weight> Item; struct ItemOrder { bool operator()(const Item &left, const Item &right) { return left.first < right.first; } }; typedef Vec<Scalar, K> Value; typedef Vec<float, K> WeightVec; MedianList(int N) : count(0), total(0.0f) { for(int k = 0; k < K; ++k) lists[k].resize(N); // allocate space } inline void push(const Value &v, Weight w) { for(int k = 0; k < K; ++k){ lists[k][count] = std::make_pair(v[k], w); } ++count; total += w; } inline void sort() { for(int k = 0; k < K; ++k) std::sort(lists[k].begin(), lists[k].end(), ItemOrder()); } inline Value get() const { const Weight middle = total * 0.5f; // varies with the position (since boundary patches have smaller weights) Value val; // value computation WeightVec weight = WeightVec::zeros(); // weight storage for(int i = 0; i < count; ++i) { // separately for each channel bool more = false; for(int k = 0; k < K; ++k) { const Item &it = lists[k][i]; if(weight[k] < middle){ weight[k] += it.second; // if we went past the center, that's the median if(weight[k] >= middle) val[k] = it.first; else more = true; // else we need to go farther } // else nothing more } if(!more) break; } return val; } private: int count; Weight total; std::vector<Item> lists[K]; }; template <int channels, typename Patch, typename Scalar> Image vote_median_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); // weights recording if (params.weights != NULL) { std::fill(params.weights, params.weights + (source->cols * source->rows), 0.0f); } // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); // sort pixel values int pw = endX - startX; int ph = endY - startY; int N = pw * ph; MedianList<channels, Scalar> points(N); //< data points int pidx = 0; for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px, ++pidx) { const Patch &patch = nnf->get(py, px); int by = y - py; // location in the patch int bx = x - px; points.push(target->at<PixVal>(patch.transform(by, bx)), params.filter[by][bx]); } } // sort data, channel by channel points.sort(); // get the sort-of median vote.at<PixVal>(y, x) = points.get(); switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = 1.0f; break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, vote.at<PixVal>(y, x) ); break; } } } return vote; } /** * \brief Vote by using a weighted filter over overlapping patches * that is reweighted as the histogram distribution changes. * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_histogram_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *T = nnf->source; const Texture *E = nnf->target; Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels)); int numPixels = T->rows * T->cols; float areaT = numPixels, areaE = E->rows * E->cols; const Filter &filter = params.filter; // 1 = compute the histograms typedef voting::Histogram<Patch, Scalar> Hist; const int K = params.histChannels.size(); std::vector<Hist> histT(K); // to be changed std::vector<Hist> histE(K); // DO NOT change for (int i = 0; i < K; ++i) { histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]); histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]); } // 2 = random traversal std::vector<Point<int> > index; index.resize(numPixels); for (int y = 0, i = 0; y < T->rows; ++y) { for (int x = 0; x < T->cols; ++x, ++i) { index[i] = Point2i(x, y); } } knuth_shuffle(unif01, &index[0], numPixels); // 3 = histogram-weighted voting // for each pixel of the source float ratioTE = areaT / areaE; for (int idx = 0; idx < numPixels; ++idx) { // the index int y = index[idx].y; int x = index[idx].x; // std::cout << "#" << idx << " @(" << y << " / " << x << ")\n"; float weightSum = 0.0f; PixVal &votedPixel = vote.at<PixVal>(y, x); // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = E->at<PixVal>(patch.transform(by, bx)); // reweighting using the histogram // w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+) float dA = 1.0f, dB = 1.0f; for (int k = 0; k < K; ++k) { int ch = params.histChannels[k]; int hbT = histT[k].count(value[ch]); int hbE = histE[k].count(value[ch]); float da; if (params.histNormalize) { da = float(hbE) * ratioTE - float(hbT); } else { da = hbE - hbT; } float db = -da; if (params.histBoosts.size() > 0 && da > 0) dA += params.histBoosts[k] * da; if (db > 0) dB += params.histWeights[k] * db; } float w = filter[by][bx] * dA / dB; votedPixel += value * w; weightSum += w; } } if (weightSum > 1e-8) { votedPixel *= 1.0 / weightSum; } else { std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n"; } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } // update the source histograms const Scalar *prevPixel = T->ptr<Scalar>(y, x); for (int k = 0; k < K; ++k) { Hist &h = histT[k]; int ch = params.histChannels[k]; // previous value float v = prevPixel[ch]; h.count(v) -= 1; // new value v = votedPixel[ch]; h.count(v) += 1; } // store in output weight map if there is one switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = weightSum; break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, votedPixel ); break; } } return vote; } /** * \brief Vote using a mask as weight for the pixels * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_weighted_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *source = nnf->source; const Texture *target = nnf->target; Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels)); const Mask &mask = params.weightMask; const float base = params.weightBase; const int midP = Patch::width() / 2; // for each pixel of the source #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < source->rows; ++y) { for (int x = 0; x < source->cols; ++x) { PixVal &votedPixel = vote.at<PixVal>(y, x); float wSum = 0.0f; // pixel distance float pxDist = mask.at<float>( std::min(std::max(y - midP, 0), mask.rows - 1), std::min(std::max(x - midP, 0), mask.cols - 1) ); // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); // /!\ Convolution: the filter is reversed in time // => at pos (x,y), the filter is at (0, 0) // => at pos (x-dx, y-dy), the filter is at (dx, dy) // ++ we assume that only patches on the left / top have a contribution int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = target->at<PixVal>(patch.transform(by, bx)); float w = std::pow(base, -(mask.at<float>(py, px) - pxDist)); votedPixel += value * w; wSum += w; } } if (wSum > 1e-8) { votedPixel *= 1.0 / wSum; } else { #if _OPENMP #pragma omp critical #endif { std::cerr << "w0 (" << wSum << ") @" << y << "/" << x << ", from: " << startY << "/" << startX; std::cerr << " to " << endY << "/" << endX << "\n"; for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { float w = std::pow(2.0f, -(mask.at<float>(py, px) - pxDist)); std::cerr << "(" << py << ", " << px << ") -> " << mask.at<float>(py, px) << " -> " << w << ", "; } } std::cerr << "\n\n"; } } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } // store in output weight map if there is one switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = wSum; break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, votedPixel ); break; } } } return vote; } /** * \brief Vote using a bidirectional similarity measure * * \param nnfs * array containing [0]:the T to S nnf, [1]: the S to T nnf * \param params * the voting parameters including the S to T nnf * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_bidir_sim_n(const NearestNeighborField<Patch, Scalar> *nnfs, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; typedef NearestNeighborField<Patch, Scalar> NNF; typedef typename NNF::SourcePatch SourcePatch; typedef typename NNF::TargetPatch TargetPatch; const NNF &dirNNF = nnfs[0]; // direct T to S nnf const NNF &revNNF = nnfs[1]; // reverse S to T nnf const Texture *T = dirNNF.source; // T in bidir sim const Texture *S = dirNNF.target; // S in bidir sim Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels)); const Filter &filter = params.filter; float *weights; // set weights to 0 to start if (params.weights != NULL) { weights = params.weights; std::fill(weights, weights + (T->height * T->width), 0.0f); } else { weights = new float[T->height * T->width](); // init to 0 } // group weights float dirWeight, revWeight; { double regularizer = std::max(dirNNF.size, revNNF.size); double dw = (1.0 - params.bidirSimWeight) * regularizer / dirNNF.size; double rw = params.bidirSimWeight * regularizer / revNNF.size; dirWeight = float(dw); revWeight = float(rw); std::cout << "dw: " << dirWeight << ", rw: " << revWeight << "\n"; } // process the reverse nnf first for (int y = 0; y < revNNF.height; ++y) { for(int x = 0; x < revNNF.width; ++x) { SourcePatch p(y, x); const TargetPatch &q = revNNF.get(y, x); // for each pixel from the patch p in S for (typename SourcePatch::IndexIterator it = p.begin(); it; ++it) { typename SourcePatch::Index i = *it; const Point2i lp(p.transform(i)); PixVal pix = S->at<PixVal>(lp); // transformation in S space // target in T const Point2i lq(q * i); // q.transform(i) const Point2i delta = lp - Point2i(x, y); // (x,y) is at the top-left of the patch float w = filter[delta.y][delta.x] * revWeight; vote.at<PixVal>(lq.y, lq.x) += pix * w; // store weight (count for Ns when filter = 1) weights[vote.cols * lq.y + lq.x] += w; } } } // process the direct nnf finally #if _OPENMP #pragma omp parallel for collapse(2) #endif for (int y = 0; y < T->rows; ++y) { for (int x = 0; x < T->cols; ++x) { PixVal &votedPixel = vote.at<PixVal>(y, x); float wT = 0.0f; // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, dirNNF.width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, dirNNF.height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const TargetPatch &patch = dirNNF.get(py, px); // /!\ Convolution: the filter is reversed in time // => at pos (x,y), the filter is at (0, 0) // => at pos (x-dx, y-dy), the filter is at (dx, dy) // ++ we assume that only patches on the left / top have a contribution int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = S->at<PixVal>(patch.transform(by, bx)); float w = filter[by][bx] * dirWeight; votedPixel += value * w; wT += w; } } float wS = weights[T->cols * y + x]; float wSum = wT + wS; weights[T->cols * y + x] = wSum; if (wSum > 1e-8) { votedPixel *= 1.0 / wSum; } else { #if _OPENMP #pragma omp critical #endif { std::cerr << "w0 @" << y << "/" << x << ", from: " << startY << "/" << startX; std::cerr << " to " << endY << "/" << endX << "\n"; } } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } } } // free weights if only used locally if(params.weights == NULL) { delete[] weights; } return vote; } /** * \brief Vote using a bidirectional similarity measure and histograms * * \param nnfs * array containing [0]:the T to S nnf, [1]: the S to T nnf * \param params * the voting parameters including the S to T nnf * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_bidir_sim_histogram_n(const NearestNeighborField<Patch, Scalar> *nnfs, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; typedef NearestNeighborField<Patch, Scalar> NNF; typedef typename NNF::SourcePatch SourcePatch; typedef typename NNF::TargetPatch TargetPatch; const NNF &dirNNF = nnfs[0]; // direct T to S nnf const NNF &revNNF = nnfs[1]; // reverse S to T nnf const Texture *T = dirNNF.source; // T in bidir sim const Texture *E = dirNNF.target; // S in bidir sim Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels)); // const Filter &filter = params.filter; float *weights; // set weights to 0 to start if (params.weights != NULL) { weights = params.weights; std::fill(weights, weights + (T->height * T->width), 0.0f); } else { weights = new float[T->height * T->width](); // init to 0 } // 1 = constructor accumulator from revNNF std::vector<Point2i> *accum = new std::vector<Point2i>[T->height * T->width](); // init vectors if(accum == NULL){ mexErrMsgIdAndTxt("MATLAB:vote:new", "Failed allocation of accum in BidirSimVoting with Histograms"); } // using the reverse nnf first for (int y = 0; y < revNNF.height; ++y) { for(int x = 0; x < revNNF.width; ++x) { SourcePatch p(y, x); const TargetPatch &q = revNNF.get(y, x); // for each pixel from the patch p in S for (typename SourcePatch::IndexIterator it = p.begin(); it; ++it) { typename SourcePatch::Index i = *it; // pixel location in E const Point2i lp(p.transform(i)); // pixel location in T const Point2i lq(q.transform(i)); accum[vote.cols * lq.y + lq.x].push_back(lp); } } } // group weights float dirWeight, revWeight; { double regularizer = std::max(dirNNF.size, revNNF.size); double dw = (1.0 - params.bidirSimWeight) * regularizer / dirNNF.size; double rw = params.bidirSimWeight * regularizer / revNNF.size; dirWeight = float(dw); revWeight = float(rw); std::cout << "dw: " << dirWeight << ", rw: " << revWeight << "\n"; } // histogram voting now, using both the direct NNF and the accumulator int numPixels = T->rows * T->cols; float areaT = numPixels, areaE = E->rows * E->cols; float ratioTE = areaT / areaE; // 2 = compute the histograms typedef voting::Histogram<Patch, Scalar> Hist; const int K = params.histChannels.size(); std::vector<Hist> histT(K); // to be changed std::vector<Hist> histE(K); // DO NOT change for (int i = 0; i < K; ++i) { histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]); histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]); } // 2 = random traversal std::vector<Point2i> index; index.resize(numPixels); for (int y = 0, i = 0; y < T->rows; ++y) { for (int x = 0; x < T->cols; ++x, ++i) { index[i] = Point2i(x, y); } } knuth_shuffle(unif01, &index[0], numPixels); // 3 = histogram-weighted voting // for each pixel of the source // + the accumulated pixels with revNNF for (int idx = 0; idx < numPixels; ++idx) { // the index int y = index[idx].y; int x = index[idx].x; float weightSum = 0.0f; PixVal &votedPixel = vote.at<PixVal>(y, x); // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, dirNNF.width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, dirNNF.height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = dirNNF.get(py, px); int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = E->at<PixVal>(patch.transform(by, bx)); // reweighting using the histogram // w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+) float dH = 1.0f; for (int k = 0; k < K; ++k) { int ch = params.histChannels[k]; int hbT = histT[k].count(value[ch]); int hbE = histE[k].count(value[ch]); float dh; if (params.histNormalize) { dh = float(hbT) - float(hbE) * ratioTE; // relatively to T } else { dh = hbT - hbE; } if (dh > 0) dH += params.histWeights[k] * dh; } // filter[by][bx] / dH float w = dirWeight / dH; // Note: filter is not used in E! votedPixel += value * w; weightSum += w; } } // for each pixel from the accumulated map const std::vector<Point2i> &pixelAccum = accum[vote.cols * y + x]; for (int i = 0, n = pixelAccum.size(); i < n; ++i) { const Point2i &p = pixelAccum[i]; PixVal value = E->at<PixVal>(p); // reweighting using the histogram // w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+) float dH = 1.0f; for (int k = 0; k < K; ++k) { int ch = params.histChannels[k]; int hbT = histT[k].count(value[ch]); int hbE = histE[k].count(value[ch]); float dh; if (params.histNormalize) { dh = float(hbT) - float(hbE) * ratioTE; // relatively to T } else { dh = hbE - hbT; } if (dh > 0) dH += params.histWeights[k] * dh; } float w = revWeight / dH; votedPixel += value * w; weightSum += w; } if (weightSum > 1e-8) { votedPixel *= 1.0 / weightSum; } else { std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n"; } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } // update the source histograms const Scalar *prevPixel = T->ptr<Scalar>(y, x); for (int k = 0; k < K; ++k) { float v; Hist &h = histT[k]; int ch = params.histChannels[k]; // previous value v = prevPixel[ch]; h.count(v) -= 1; // new value v = votedPixel[ch]; h.count(v) += 1; } // store in output weight map if there is one switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = weightSum; break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( &dirNNF, y, startY, endY, x, startX, endX, votedPixel ); break; } } // free memory delete[] accum; if(params.weights == NULL) { delete[] weights; } // done! return vote; } /** * \brief Feature-specific voting and histogram voting for the rest * * \param nnf * the nearest neighbor field to vote with * \param params * the voting parameters * \return the voted picture */ template <int channels, typename Patch, typename Scalar> Image vote_feature_histogram_n(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { typedef Vec<Scalar, channels> PixVal; const Texture *T = nnf->source; const Texture *E = nnf->target; Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels)); int numPixels = T->rows * T->cols; float areaT = numPixels, areaE = E->rows * E->cols; const Filter &filter = params.filter; // 1 = compute the histograms typedef voting::Histogram<Patch, Scalar> Hist; const int K = params.histChannels.size(); std::vector<Hist> histT(K); // to be changed std::vector<Hist> histE(K); // DO NOT change for (int i = 0; i < K; ++i) { histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]); histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]); } // the feature channels std::vector<unsigned int> features; for(unsigned int c = 0; c < channels; ++c) { bool isFeature = true; for(unsigned int k = 0; k < K; ++k) { if(c == params.histChannels[k]){ std::cout << "H" << c << ", "; isFeature = false; break; } } if(isFeature) { std::cout << "F" << c << ", "; features.push_back(c); } } std::cout << "\n"; // 2 = random traversal std::vector<Point<int> > index; index.resize(numPixels); for (int y = 0, i = 0; y < T->rows; ++y) { for (int x = 0; x < T->cols; ++x, ++i) { index[i] = Point2i(x, y); } } knuth_shuffle(unif01, &index[0], numPixels); // 3 = histogram-weighted voting // for each pixel of the source float ratioTE = areaT / areaE; for (int idx = 0; idx < numPixels; ++idx) { // the index int y = index[idx].y; int x = index[idx].x; // std::cout << "#" << idx << " @(" << y << " / " << x << ")\n"; float weightSum = 0.0f; PixVal &votedPixel = vote.at<PixVal>(y, x); for(int f = 0; f < features.size(); ++f) { votedPixel[features[f]] = FLT_MAX; } // for each pixel from the corresponding patches int startX = std::max(0, x - Patch::width() + 1); int endX = std::min(x + 1, nnf->width); int startY = std::max(0, y - Patch::width() + 1); int endY = std::min(y + 1, nnf->height); for (int py = startY; py < endY; ++py) { for (int px = startX; px < endX; ++px) { const Patch &patch = nnf->get(py, px); int by = y - py; //< pixelInPatch = pos - patchPos int bx = x - px; PixVal value = E->at<PixVal>(patch.transform(by, bx)); // extract feature data PixVal F; for(int f = 0; f < features.size(); ++f){ int c = features[f]; F[c] = value[c]; value[c] = Scalar(0.0); } // reweighting using the histogram // w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+) float dA = 1.0f, dB = 1.0f; for (int k = 0; k < K; ++k) { int ch = params.histChannels[k]; int hbT = histT[k].count(value[ch]); int hbE = histE[k].count(value[ch]); float da; if (params.histNormalize) { da = float(hbE) * ratioTE - float(hbT); } else { da = hbE - hbT; } float db = -da; if (params.histBoosts.size() > 0 && da > 0) dA += params.histBoosts[k] * da; if (db > 0) dB += params.histWeights[k] * db; } float w = filter[by][bx] * dA / dB; votedPixel += value * w; weightSum += w; // feature specific values for(int f = 0; f < features.size(); ++f) { int c = features[f]; votedPixel[c] = params.featureOp(votedPixel[c], F[c]); } } } float invWeight = 1.0f / weightSum; for(int i = 0; i < K; ++i) votedPixel[params.histChannels[i]] *= invWeight; // c cannot intersect with k! if (weightSum <= 1e-8) { std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n"; } // binary channels for(int i = 0; i < params.binaryChannels.size(); ++i) { Scalar &v = votedPixel[params.binaryChannels[i]]; if(v >= 50.0){ v = 100.0; } else { v = 0.0; } } // update the source histograms const Scalar *prevPixel = T->ptr<Scalar>(y, x); for (int k = 0; k < K; ++k) { Hist &h = histT[k]; int ch = params.histChannels[k]; // previous value float v = prevPixel[ch]; h.count(v) -= 1; // new value v = votedPixel[ch]; h.count(v) += 1; } // store in output weight map if there is one switch(params.weightType) { case PixelWeight: params.weights[vote.cols * y + x] = weightSum; break; case PixelVariance: params.weights[vote.cols * y + x] = pixelvar( nnf, y, startY, endY, x, startX, endX, votedPixel ); break; } } return vote; } // ######################################################################### // ##### Multi-channels voting ############################################# // ######################################################################### template <typename Patch, typename Scalar, int channels> struct MultiChannelVote { inline Image operator()(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) const { if (nnf->source->channels() == channels) { switch (params.method) { case BiDirSimVoting: return vote_bidir_sim_n<channels>(nnf, params); case BiDirSimWithHistogramVoting: return vote_bidir_sim_histogram_n<channels>(nnf, params); case DefaultVoting: return vote_filter_n<channels>(nnf, params); case DefaultGBAVoting: return vote_filter_gba_n<channels>(nnf, params); case DirectVoting: return vote_direct_n<channels>(nnf, params); case FeatureWithHistogramVoting: return vote_feature_histogram_n<channels>(nnf, params); case HistogramVoting: return vote_histogram_n<channels>(nnf, params); case MeanShiftVoting: return vote_meanshift_n<channels>(nnf, params); case MedianVoting: return vote_median_n<channels>(nnf, params); case TiledVoting: return vote_filter_tiled_n<channels>(nnf, params); case WeightedVoting: return vote_weighted_n<channels>(nnf, params); default: std::cerr << "Invalid voting method: " << params.method << " !"; return Image(); } } else { MultiChannelVote<Patch, Scalar, channels + 1 > vop; return vop(nnf, params); } } }; #ifndef MAX_SUPPORTED_CHANNELS #define MAX_SUPPORTED_CHANNELS 12 #endif template <typename Patch, typename Scalar> struct MultiChannelVote<Patch, Scalar, MAX_SUPPORTED_CHANNELS + 1 > { inline Image operator()(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) const { std::cerr << "Warning: voting only supported up to " << MAX_SUPPORTED_CHANNELS << " channels!\n"; return Image(); } }; template <typename Patch, typename Scalar> Image vote(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams &params) { MultiChannelVote<Patch, Scalar, 1> vop; return vop(nnf, params); } } #endif /* VOTE_H */
GB_binop__eq_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int32) // A*D function (colscale): GB (_AxD__eq_int32) // D*A function (rowscale): GB (_DxB__eq_int32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int32) // C=scalar+B GB (_bind1st__eq_int32) // C=scalar+B' GB (_bind1st_tran__eq_int32) // C=A+scalar GB (_bind2nd__eq_int32) // C=A'+scalar GB (_bind2nd_tran__eq_int32) // C type: bool // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT32 || GxB_NO_EQ_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #else // __ARM_FEATURE_DOTPROD if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // split pack8 to pack4 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0], #32 \n" "ld2 {v4.4s, v5.4s}, [%0], #32 \n" "ld2 {v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #96 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v4.16b}, [%1], #16 \n" "st1 {v6.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" "st1 {v5.16b}, [%1], #16 \n" "st1 {v7.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size * 8; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0] \n" "sub %0, %0, #32 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld2 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.16b, v1.16b}, [%0] \n" "st1 {v0.16b, v1.16b}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #endif // __ARM_FEATURE_DOTPROD img0 += size * 8; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld2 {v0.2s, v1.2s}, [%0] \n" "st1 {v0.2s, v1.2s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #endif // __ARM_FEATURE_DOTPROD #else asm volatile( "pld [%0, #128] \n" "vld1.s8 {d0-d1}, [%0 :64] \n" "vst1.s8 {d0-d1}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif img0 += size * 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0 :64] \n" "vst1.s8 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); #endif img0 += size * 8; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v24.16b}, [%3], #16 \n" // _w0123_l "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "ld1 {v16.16b}, [%2], #16 \n" // _val0123_l "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "ld1 {v17.16b}, [%2], #16 \n" // _val4567_l "sdot v0.4s, v24.16b, v16.4b[0] \n" "sdot v1.4s, v24.16b, v16.4b[1] \n" "sdot v2.4s, v24.16b, v16.4b[2] \n" "sdot v3.4s, v24.16b, v16.4b[3] \n" "ld1 {v18.16b}, [%2], #16 \n" // _val891011_l "sdot v4.4s, v24.16b, v17.4b[0] \n" "sdot v5.4s, v24.16b, v17.4b[1] \n" "sdot v6.4s, v24.16b, v17.4b[2] \n" "sdot v7.4s, v24.16b, v17.4b[3] \n" "ld1 {v19.16b}, [%2], #16 \n" // _val12131415_l "sdot v8.4s, v24.16b, v18.4b[0] \n" "sdot v9.4s, v24.16b, v18.4b[1] \n" "ld1 {v25.16b}, [%3], #16 \n" // _w0123_h "sdot v10.4s, v24.16b, v18.4b[2] \n" "sdot v11.4s, v24.16b, v18.4b[3] \n" "ld1 {v20.16b}, [%2], #16 \n" // _val0123_h "sdot v12.4s, v24.16b, v19.4b[0] \n" "sdot v13.4s, v24.16b, v19.4b[1] \n" "sdot v14.4s, v24.16b, v19.4b[2] \n" "sdot v15.4s, v24.16b, v19.4b[3] \n" "ld1 {v21.16b}, [%2], #16 \n" // _val4567_h "sdot v0.4s, v25.16b, v20.4b[0] \n" "sdot v1.4s, v25.16b, v20.4b[1] \n" "sdot v2.4s, v25.16b, v20.4b[2] \n" "sdot v3.4s, v25.16b, v20.4b[3] \n" "ld1 {v22.16b}, [%2], #16 \n" // _val891011_h "sdot v4.4s, v25.16b, v21.4b[0] \n" "sdot v5.4s, v25.16b, v21.4b[1] \n" "sdot v6.4s, v25.16b, v21.4b[2] \n" "sdot v7.4s, v25.16b, v21.4b[3] \n" "ld1 {v23.16b}, [%2], #16 \n" // _val12131415_h "sdot v8.4s, v25.16b, v22.4b[0] \n" "sdot v9.4s, v25.16b, v22.4b[1] \n" "ld1 {v24.16b}, [%3], #16 \n" // _w0123_l "sdot v10.4s, v25.16b, v22.4b[2] \n" "sdot v11.4s, v25.16b, v22.4b[3] \n" "ld1 {v16.16b}, [%2], #16 \n" // _val0123_l "sdot v12.4s, v25.16b, v23.4b[0] \n" "sdot v13.4s, v25.16b, v23.4b[1] \n" "subs %w1, %w1, #1 \n" "sdot v14.4s, v25.16b, v23.4b[2] \n" "sdot v15.4s, v25.16b, v23.4b[3] \n" "bne 0b \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); vst1q_s32(outptr0 + 16, _sum4); vst1q_s32(outptr0 + 20, _sum5); vst1q_s32(outptr0 + 24, _sum6); vst1q_s32(outptr0 + 28, _sum7); outptr0 += 32; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%2, #128] \n" "prfm pldl1keep, [%3, #256] \n" "lsr w4, %w1, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%3, #512] \n" "add x5, %2, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%2] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n" "add %2, %2, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%2] \n" "add %2, %2, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%3, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%3, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%2] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %2, %2, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%2] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %2, %2, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%2, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%2, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %2, %2, #64 \n" "sub %3, %3, #64 \n" "1: \n" "and w4, %w1, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%2], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%3], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%2], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __aarch64__ #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val01_l_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val01_l_h, 2); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); int32x4_t _s00123 = vpaddq_s32(_s001, _s023); int32x4_t _s10123 = vpaddq_s32(_s101, _s123); vst1q_s32(outptr0, _s00123); vst1q_s32(outptr0 + 4, _s10123); outptr0 += 8; #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "pld [%2, #256] \n" "lsr r4, %1, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %3, #16 \n" "pld [%3, #128] \n" "mov r6, #32 \n" "pld [%3, #384] \n" "vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%2 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%2, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%3, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%2 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%2 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%2, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%3, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %2, %2, #32 \n" "sub %3, %3, #64 \n" "1: \n" "and r4, %1, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%2 :128]! \n" // _val "vld1.s8 {d20-d21}, [%3 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%3 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "vst1.s32 {d0-d3}, [%0 :128]! \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } vst1q_s32(outptr0, _sum0); outptr0 += 4; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); int32x4_t _s0123 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); int32x4_t _s0123 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif vst1q_s32(outptr0, _s0123); outptr0 += 4; #endif // __ARM_FEATURE_DOTPROD } } } static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(32 * maxk, inch / 8, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } } } static void convolution_im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); int8x8_t _val2 = vld1_s8(sptr + stride_w * 16); int8x8_t _val3 = vld1_s8(sptr + stride_w * 24); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); vst1_s8(ptr + 16, _val2); vst1_s8(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { int8x8_t _val = vld1_s8(sptr); vst1_s8(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8to4_int8_neon(bottom_im2col, top_blob, kernel, opt); }
search_lsh.h
#include <boost/functional/hash.hpp> #include <boost/geometry/index/rtree.hpp> #include <unordered_map> #include <vector> #include <set> #include "utils.h" #include "rss.h" using namespace std; extern int_t nb; extern int_t nq; extern int_t dimension; extern float *xb; extern float *xq; extern float *rg; extern vector<vector<int > > gt; using std::shared_ptr; using std::vector; using std::unordered_map; template<typename T> class Hasher { public: size_t operator()(const std::vector<T>& vec) const { size_t seed = 0; for (T v : vec) { boost::hash_combine(seed, v); } return seed; } }; class E2LSH { private: int k_; int d_; float r_; int size_; shared_ptr<float > bias_; shared_ptr<float > projectors_; unordered_map<vector<int>, vector<int>, Hasher<int > > hash_map_; public: E2LSH(int k, int d, float r) : k_(k), d_(d), r_(r), size_(0), bias_(new float[k]), projectors_(new float[k * d]) { uniform_random_fill(bias_.get(), k, 0.f, r); normal_random_fill(projectors_.get(), k * d, 0.f, 1.f); } size_t size() const { return hash_map_.size(); } void add(const float* x, int n) { for (int i = 0; i < n; ++i) { auto key = hash(x + i * d_); hash_map_[key].push_back(size_++); } } const vector<int >& search(const float* x) { auto key = hash(x); return hash_map_[key]; } set<int > range_search(const float* x, const float* r) { vector<int > hv_upper(k_); vector<int > hv_lower(k_); vector<int > hv(k_); double distinct_cnt = 1; for (int i = 0; i < k_; ++i) { float projected_x = 0.; float projected_r = 0.; for (int di = 0; di < d_; ++di) { projected_x += x[di] * projectors_.get()[i * d_ + di]; projected_r += r[di] * std::abs(projectors_.get()[i * d_ + di]); } hv_lower[i] = std::ceil((projected_x - projected_r + bias_.get()[i]) / r_) ; hv_upper[i] = std::ceil((projected_x + projected_r + bias_.get()[i]) / r_) ; hv[i] = std::ceil((projected_x + bias_.get()[i]) / r_) ; distinct_cnt *= (hv_upper[i] - hv_lower[i] + 1); } // std::cout << "distinct_cnt : " << distinct_cnt << "\n"; const vector<int >& res = hash_map_[hv]; std::set<int > s(res.begin(), res.end()); for(int i = 0; i < k_; i++) { if (hv_lower[i] > hv[i]) { hv[i]--; const vector<int >& r = hash_map_[hv]; s.insert(r.begin(), r.end()); hv[i]++; } if (hv_upper[i] < hv[i]) { hv[i]++; const vector<int >& r = hash_map_[hv]; s.insert(r.begin(), r.end()); hv[i]--; } } return s; } vector<int > hash(const float* x) { vector<int > hv(k_); for (int i = 0; i < k_; ++i) { float projected = std::inner_product(x, x + d_, projectors_.get() + i * d_, 0.); hv[i] = std::ceil((projected + bias_.get()[i]) / r_) ; } return hv; } }; class SRP { typedef unsigned long long HV; private: int k_; int d_; int size_; shared_ptr<float > projectors_; unordered_map<HV, vector<int> > hash_map_; public: SRP(int k, int d, float r) : k_(k), d_(d), size_(0), projectors_(new float[k * d]) { normal_random_fill(projectors_.get(), k * d, 0.f, 1.f); } size_t size() const { return hash_map_.size(); } void add(const float* x, int n) { for (int i = 0; i < n; ++i) { auto key = hash(x + i * d_); hash_map_[key].push_back(size_++); } } const vector<int >& search(const float* x) { auto key = hash(x); return hash_map_[key]; } set<int > range_search(const float* x, const float* r) { const vector<int >& vct = search(x); return set<int >(vct.begin(), vct.end()); } HV hash(const float* x) { HV hv = 0; for (int i = 0; i < k_; ++i) { hv *= 2; float projected = std::inner_product(x, x + d_, projectors_.get() + i * d_, 0.); hv |= projected > 0; } return hv; } }; template<class LSHTable > class LSH { private: vector<LSHTable > lsh_; public: LSH(int l, int k, int d, float r) { for (int i = 0 ; i < l; i++) { lsh_.emplace_back(k, d, r); } } float add(const float* x, int n) { #pragma omp parallel for for(int i = 0; i < lsh_.size(); i++) { lsh_[i].add(x, n); } float sum_size = 0; for (const auto& lsh : lsh_) { sum_size += lsh.size(); } return sum_size / lsh_.size(); } std::set<int > search(const float* x, int l) { std::set<int > s; for(int i = 0; i < l; i++) { auto r = lsh_[i].search(x); s.insert(r.begin(), r.end()); } return s; } std::set<int > range_search(const float* x, const float* r, int l) { std::set<int > s; for(int i = 0; i < l; i++) { auto res = lsh_[i].range_search(x, r); s.insert(res.begin(), res.end()); } return s; } };
GB_unaryop__identity_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint16 // op(A') function: GB_tran__identity_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint16 ( int16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H #if EIGEN_HAS_CXX11_ATOMIC #include <atomic> #endif namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads) if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} // volatile is not enough on all architectures (see bug 1572) // to guarantee that when thread A says to thread B that it is // done with packing a block, then all writes have been really // carried out... C++11 memory model+atomic guarantees this. #if EIGEN_HAS_CXX11_ATOMIC std::atomic<Index> sync; std::atomic<int> users; #else Index volatile sync; int volatile users; #endif Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types // Without C++11, we have to disable GEMM's parallelization on // non x86 architectures because there volatile is not enough for our purpose. // See bug 1572. #if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64)) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redesigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) )); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
main.c
/*BHEADER**************************************************************** * (c) 2007 The Regents of the University of California * * * * See the file COPYRIGHT_and_DISCLAIMER for a complete copyright * * notice and disclaimer. * * * *EHEADER****************************************************************/ //-------------- // A micro kernel //-------------- #include <stdio.h> #include <stdlib.h> #include "omprace.h" #include <omp.h> #include "headers.h" #define OMPP_INIT omprace_init(); #define OMPP_FINI omprace_fini(); // const int testIter = /*50000;*/1000; double totalWallTime = 0.0; // void test_Matvec(); void test_Relax(); void test_Axpy(); // int main(int argc, char *argv[]) { OMPP_INIT double t0 = 0.0, t1 = 0.0, del_wtime = 0.0; int max_num_threads; printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// CORAL AMGmk Benchmark Version 1.0 \n"); printf("// \n"); printf("//------------ \n"); #pragma omp parallel #pragma omp master max_num_threads = omp_get_num_threads(); printf("\nmax_num_threads = %d \n\n",max_num_threads ); printf("\n testIter = %d \n\n", testIter ); t0 = omp_get_wtime(); // Matvec totalWallTime = 0.0; test_Matvec(); printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// MATVEC\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); // Relax totalWallTime = 0.0; test_Relax(); //__WHATIF__BEGIN__ printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// Relax\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); // Axpy totalWallTime = 0.0; test_Axpy(); printf("\n"); printf("//------------ \n"); printf("// \n"); printf("// Axpy\n"); printf("// \n"); printf("//------------ \n"); printf("\nWall time = %f seconds. \n", totalWallTime); t1 = omp_get_wtime();; del_wtime = t1 - t0; printf("\nTotal Wall time = %f seconds. \n", del_wtime); //__WHATIF__END__ OMPP_FINI return 0; } void test_Matvec() { double t0 = 0.0, t1 = 0.0; hypre_CSRMatrix *A; hypre_Vector *x, *y, *sol; int nx, ny, nz, i; double *values; double *y_data, *sol_data; double error, diff; nx = 50; /* size per proc nx*ny*nz */ ny = 50; nz = 50; values = hypre_CTAlloc(double, 4); values[0] = 6; values[1] = -1; values[2] = -1; values[3] = -1; A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol); hypre_SeqVectorSetConstantValues(x,1); hypre_SeqVectorSetConstantValues(y,0); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_CSRMatrixMatvec(1,A,x,0,y); t1 = omp_get_wtime() ; totalWallTime += t1 - t0; y_data = hypre_VectorData(y); sol_data = hypre_VectorData(sol); error = 0; for (i=0; i < nx*ny*nz; i++) { diff = fabs(y_data[i]-sol_data[i]); if (diff > error) error = diff; } if (error > 0) printf(" \n Matvec: error: %e\n", error); hypre_TFree(values); hypre_CSRMatrixDestroy(A); hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); hypre_SeqVectorDestroy(sol); } void test_Relax() { double t0 = 0.0, t1 = 0.0; hypre_CSRMatrix *A; hypre_Vector *x, *y, *sol; int nx, ny, nz, i; double *values; double *x_data; double diff, error; nx = 50; /* size per proc nx*ny*nz */ ny = 50; nz = 50; values = hypre_CTAlloc(double, 4); values[0] = 6; values[1] = -1; values[2] = -1; values[3] = -1; A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol); hypre_SeqVectorSetConstantValues(x,1); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_BoomerAMGSeqRelax(A, sol, x); t1 = omp_get_wtime(); totalWallTime += t1 - t0; x_data = hypre_VectorData(x); error = 0; for (i=0; i < nx*ny*nz; i++) { diff = fabs(x_data[i]-1); if (diff > error) error = diff; } if (error > 0) printf(" \n Relax: error: %e\n", error); hypre_TFree(values); hypre_CSRMatrixDestroy(A); hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); hypre_SeqVectorDestroy(sol); } void test_Axpy() { double t0 = 0.0, t1 = 0.0; hypre_Vector *x, *y; int nx, i; double alpha=0.5; double diff, error; double *y_data; nx = 125000; /* size per proc */ x = hypre_SeqVectorCreate(nx); y = hypre_SeqVectorCreate(nx); hypre_SeqVectorInitialize(x); hypre_SeqVectorInitialize(y); hypre_SeqVectorSetConstantValues(x,1); hypre_SeqVectorSetConstantValues(y,1); t0 = omp_get_wtime(); for (i=0; i<testIter; ++i) hypre_SeqVectorAxpy(alpha,x,y); t1 = omp_get_wtime(); y_data = hypre_VectorData(y); error = 0; for (i=0; i < nx; i++) { diff = fabs(y_data[i]-1-0.5*(double)testIter); if (diff > error) error = diff; } if (error > 0) printf(" \n Axpy: error: %e\n", error); totalWallTime += t1 - t0; hypre_SeqVectorDestroy(x); hypre_SeqVectorDestroy(y); }
mea_mea_traco.c
#include <stdio.h> #include <stdlib.h> #include <limits.h> #include <omp.h> #include <math.h> #define min(a,b) (((a)<(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define floord(n,d) floor(((double)(n))/((double)(d))) #define ceild(n,d) ceil(((double)(n))/((double)(d))) double ** Q; double ** Qbp; double ** Pbp; double * Puu; double ** M; int Ebp = 0; // Energy weight of base pair -2, -1, 0, 1, 2 int RT = 1; // 'Normalized' temperature 1,2,3,4,5 float ERT; int l = 0; //minimum loop length 0-5 int delta = 1; // Base pair weighting 1-5 char * RNA; //only ACGU int N; int DIM; #include "../mem.h" int paired(int i, int j) { char nt1 = RNA[i]; char nt2 = RNA[j]; if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') || (nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') || (nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){ return 1;} else return 0; } int main(int argc, char *argv[]){ int num_proc=1; int i,j,k,ll,p,q; int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c15; int t1, t2, t3, t4, t5, t6,t7; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; ERT = exp((float)-Ebp/(float)RT); srand(time(NULL)); if(argc > 1) num_proc = atoi(argv[1]); int kind=1; N = 8; DIM = 12; if(argc > 2) N = atoi(argv[2]); DIM = N+10; if(argc > 3) kind = atoi(argv[3]); omp_set_num_threads(num_proc); //printf(" -exp(Ebp/RT) = %5.3f\n", ERT); RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file Puu = (double *) malloc(DIM * sizeof(double*)); rand_seq(RNA, N); //printf("Sequence: "); //for(i=0; i<N; i++) // printf("%c", RNA[i]); //printf("\n\n"); Q = memd(); Qbp = memd(); Pbp = memd(); M = memd(); rna_array_init(Q, 1, 1); rna_array_init(Qbp, 0, 0); rna_array_init(Pbp, 0, 0); // rna_array_init(Puu, 0, 0); rna_array_init(M, 0, 0); int a = 0; double start = omp_get_wtime(); // compute the partition functions Q and Qbp if(kind==1){ #pragma scop for(i=N-1; i>=0; i--){ for(j=i+1; j<N; j++){ for(k=0; k<j-i-l; k++){ // a++; M[i][j] = MAX(M[i][j], M[i][k+i-1] + M[k+i+1][j-1] + delta*Pbp[k+i][j])*paired(k+i,j-1); } M[i][j] = MAX(M[i][j], M[i][j-1] + Puu[j-1]); } } #pragma endscop } if(kind==2) // pluto { printf("pluto\n"); /* if (N >= 2) { for (t1=1;t1<=N-1;t1++) { lbp=0; ubp=t1-1; #pragma omp parallel for private(lbv,ubv,t3,t4,t5) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=floord(t1-t2-1,16);t3++) { for (t5=16*t3;t5<=min(16*t3+15,t1-t2-1);t5++) { M[t2][t1] = MAX(M[t2][t1], M[t2][t5+t2-1] + M[t5+t2+1][t1-1] + delta*Pbp[t5+t2][t1])*paired(t5+t2,t1-1);; } } t3 = floord(t1,16); M[t2][t1] = MAX(M[t2][t1], M[t2][t1-1] + Puu[t1-1]);; } } } */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if (N >= 2) { for (t1=1;t1<=N-1;t1++) { lbp=0; ubp=floord(t1-1,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=floord(t1,16);t3++) { if ((t1 <= 16*t3+15) && (t2 == 0)) { for (t4=0;t4<=t1-16*t3-1;t4++) { for (t5=16*t3;t5<=t1-t4-1;t5++) { M[t4][t1] = MAX(M[t4][t1], M[t4][t5+t4-1] + M[t5+t4+1][t1-1] + delta*Pbp[t5+t4][t1])*paired(t5+t4,t1-1);; } M[t4][t1] = MAX(M[t4][t1], M[t4][t1-1] + Puu[t1-1]);; } } if (t1 >= 16*t3+16) { for (t4=16*t2;t4<=min(16*t2+15,t1-16*t3-1);t4++) { for (t5=16*t3;t5<=min(16*t3+15,t1-t4-1);t5++) { M[t4][t1] = MAX(M[t4][t1], M[t4][t5+t4-1] + M[t5+t4+1][t1-1] + delta*Pbp[t5+t4][t1])*paired(t5+t4,t1-1);; } } } if (t1 <= 16*t3+15) { for (t4=max(16*t2,t1-16*t3);t4<=min(t1-1,16*t2+15);t4++) { M[t4][t1] = MAX(M[t4][t1], M[t4][t1-1] + Puu[t1-1]);; } } } } } } /* End of CLooG code */ } if(kind==3) // traco { printf("traco\n"); /* for( c1 = 2; c1 < 2 * N + floord(N - 2, 16) - 1; c1 += 1) #pragma omp parallel for for( c3 = max(-2 * N + c1 + 2, -((c1 - 1) % 2) + 1); c3 <= (c1 - 2) / 33; c3 += 2) { for( c5 = 0; c5 <= c3; c5 += 1) for( c9 = ((-c1 + 33 * c3) / 2) + N; c9 <= min(N - 1, ((-c1 + 33 * c3) / 2) + N + 15); c9 += 1) for( c11 = 16 * c5; c11 <= min(min(16 * c3 + 1, 16 * c5 + 15), ((c1 - c3) / 2) - N + c9); c11 += 1) M[(((-c1+c3)/2)+N-1)][c9] = MAX(M[(((-c1+c3)/2)+N-1)][c9], M[(((-c1+c3)/2)+N-1)][c11+(((-c1+c3)/2)+N-1)-1] + M[c11+(((-c1+c3)/2)+N-1)+1][c9-1] + delta*Pbp[c11+(((-c1+c3)/2)+N-1)][c9])*paired(c11+(((-c1+c3)/2)+N-1),c9-1); for( c9 = ((-c1 + 33 * c3) / 2) + N; c9 <= min(N - 1, ((-c1 + 33 * c3) / 2) + N + 15); c9 += 1) for( c10 = max(0, 8 * c3 - c9 + (2 * N - c1 + c3 + 2 * c9 - 1) / 4 + 2); c10 <= 1; c10 += 1) { if (c10 == 1) { M[(((-c1+c3)/2)+N-1)][c9] = MAX(M[(((-c1+c3)/2)+N-1)][c9], M[(((-c1+c3)/2)+N-1)][c9-1] + Puu[c9-1]); } else { for( c11 = 16 * c3 + 2; c11 <= ((c1 - c3) / 2) - N + c9; c11 += 1) M[(((-c1+c3)/2)+N-1)][c9] = MAX(M[(((-c1+c3)/2)+N-1)][c9], M[(((-c1+c3)/2)+N-1)][c11+(((-c1+c3)/2)+N-1)-1] + M[c11+(((-c1+c3)/2)+N-1)+1][c9-1] + delta*Pbp[c11+(((-c1+c3)/2)+N-1)][c9])*paired(c11+(((-c1+c3)/2)+N-1),c9-1); } } } */ for( c0 = 1; c0 < N + floord(N - 2, 16); c0 += 1) #pragma omp parallel for for( c1 = c0 - (c0 + 16) / 17 + 1; c1 <= min(N - 1, c0); c1 += 1) for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(c1, 16 * c0 - 16 * c1 + 16); c3 += 1) { for( c4 = 0; c4 <= c0 - c1; c4 += 1) for( c10 = 16 * c4; c10 <= min(c3 - 1, 16 * c4 + 15); c10 += 1){ // a++; M[(N-c1-1)][(N-c1+c3-1)] = MAX(M[(N-c1-1)][(N-c1+c3-1)], M[(N-c1-1)][c10+(N-c1-1)-1] + M[c10+(N-c1-1)+1][(N-c1+c3-1)-1] + delta*Pbp[c10+(N-c1-1)][(N-c1+c3-1)])*paired(c10+(N-c1-1),(N-c1+c3-1)-1); } M[(N-c1-1)][(N-c1+c3-1)] = MAX(M[(N-c1-1)][(N-c1+c3-1)], M[(N-c1-1)][(N-c1+c3-1)-1] + Puu[(N-c1+c3-1)-1]); } } if(kind==4) // traco tstile { printf("traco corr\n"); for( c1 = 1; c1 < N + floord(N - 2, 128); c1 += 1) #pragma omp parallel for schedule(dynamic, 1) for( c3 = max(0, -N + c1 + 1); c3 <= (c1 - 1) / 129; c3 += 1) for( c4 = 0; c4 <= 1; c4 += 1) { if (c4 == 1) { for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1) for( c10 = max(0, -c1 + 64 * c3 - c9 + (N + c1 + c3 + c9 + 1) / 2 + 1); c10 <= 1; c10 += 1) { if (c10 == 1) { M[(N-c1+c3-1)][c9] = MAX(M[(N-c1+c3-1)][c9], M[(N-c1+c3-1)][c9-1] + Puu[c9-1]); } else { for( c11 = 128 * c3 + 2; c11 <= -N + c1 - c3 + c9; c11 += 1) M[(N-c1+c3-1)][c9] = MAX(M[(N-c1+c3-1)][c9], M[(N-c1+c3-1)][c11+(N-c1+c3-1)-1] + M[c11+(N-c1+c3-1)+1][c9-1] + delta*Pbp[c11+(N-c1+c3-1)][c9])*paired(c11+(N-c1+c3-1),c9-1); } } } else { for( c5 = 0; c5 <= 8 * c3; c5 += 1) for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1) for( c11 = 16 * c5; c11 <= min(min(128 * c3 + 1, 16 * c5 + 15), -N + c1 - c3 + c9); c11 += 1) M[(N-c1+c3-1)][c9] = MAX(M[(N-c1+c3-1)][c9], M[(N-c1+c3-1)][c11+(N-c1+c3-1)-1] + M[c11+(N-c1+c3-1)+1][c9-1] + delta*Pbp[c11+(N-c1+c3-1)][c9])*paired(c11+(N-c1+c3-1),c9-1); } } } double stop = omp_get_wtime(); printf("%.4f %i\n",stop - start, a); //printf("Q\n"); //rna_array_print(Q); //printf("Qbp\n"); //rna_array_print(Qbp); exit(0); #pragma scop for(i=N-1; i>=0; i--){ for(j=i+1; j<N; j++){ for(k=0; k<j-i-l; k++){ M[i][j] = MAX(M[i][j], M[i][k+i-1] + M[k+i+1][j-1] + delta*Pbp[k+i][j])*paired(k+i,j-1); } M[i][j] = MAX(M[i][j], M[i][j-1] + Puu[j-1]); } } #pragma endscop printf("M\n"); rna_array_print(M); return 0; }
fci_contract.c
/* * Full CI */ #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) // for (16e,16o) ~ 11 MB buffer = 120 * 12870 * 8 #define STRB_BLKSIZE 112 /* * CPU timing of single thread can be estimated: * na*nb*nnorb*8(bytes)*5 / (mem_freq*64 (*2 if dual-channel mem)) * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) * where the 5 times memory accesses are 3 in prog_a_t1, prog0_b_t1, * spread_b_t1 and 2 in spread_a_t1 * * multi threads * na*nb*nnorb*8(bytes)*2 / (mem_freq*64 (*2 if dual-channel mem)) due to single thread * + na*nb*nnorb*8(bytes)*3 / max_mem_bandwidth due to N-thread * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) / num_threads */ /* *********************************************************** * * Need the permutation symmetry * h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k] * *********************************************************** */ /* * optimize for OpenMP, to reduce memory/CPU data transfer * add software prefetch, it's especially important for OpenMP */ /* * For given stra_id, spread alpah-strings (which can propagate to stra_id) * into t1[:nstrb,nnorb] * str1-of-alpha -> create/annihilate -> str0-of-alpha * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_a_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci0 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + ia*bcount; pci = ci0 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k] += pci[k]; } } else if (sign < 0) { for (k = 0; k < bcount; k++) { pt1[k] -= pci[k]; } } } } /* * For given stra_id, spread all beta-strings into t1[:nstrb,nnorb] * all str0-of-beta -> create/annihilate -> str1-of-beta * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_b_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else if (sign > 0) { t1[ia*bcount+str0] += pci[str1]; } else { t1[ia*bcount+str0] -= pci[str1]; } } tab += nlinkb; } } /* * spread t1 into ci1 */ void FCIspread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci1 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*bcount; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } void FCIspread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else if (sign > 0) { pci[str1] += t1[ia*bcount+str0]; } else if (sign < 0) { pci[str1] -= t1[ia*bcount+str0]; } } tab += nlinkb; } } /* * f1e_tril is the 1e hamiltonian for spin alpha */ void FCIcontract_a_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinka * nstra); FCIcompress_link_tril(clink, link_indexa, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * f1e_tril[ia]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } /* * f1e_tril is the 1e hamiltonian for spin beta */ void FCIcontract_b_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinkb * nstrb); FCIcompress_link_tril(clink, link_indexb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * f1e_tril[ia]; } } } free(clink); } void FCIcontract_1e_spin0(double *f1e_tril, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { memset(ci1, 0, sizeof(double)*na*na); FCIcontract_a_1e(f1e_tril, ci0, ci1, norb, na, na, nlink, nlink, link_index, link_index); } /* * spread t1 into ci1buf */ static void spread_bufa_t1(double *ci1, double *t1, int nrow_t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*nrow_t1; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } /* * bcount_for_spread_a is different for spin1 and spin0 */ static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); //FCIspread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, // norb, ncol_ci1buf, nlinka, clink_indexa); spread_bufa_t1(ci1buf, vt1, bcount, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } void FCIaxpy2d(double *out, double *in, size_t count, size_t no, size_t ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } void FCIomp_reduce_inplace(double **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); unsigned int bit, thread_src; unsigned int mask = 0; double *dst = vec[thread_id]; double *src; size_t i; for (bit = 0; (1<<bit) < nthreads; bit++) { mask |= 1 << bit; if (!(thread_id & mask)) { thread_src = thread_id | (1<<bit); if (thread_src < nthreads) { src = vec[thread_src]; for (i = 0; i < count; i++) { dst[i] += src[i]; } } } #pragma omp barrier } } /* * nlink = nocc*nvir, num. all possible strings that a string can link to * link_index[str0] == linking map between str0 and other strings * link_index[str0][ith-linking-string] == * [tril(creation_op,annihilation_op),0,linking-string-id,sign] * FCIcontract_2e_spin0 only compute half of the contraction, due to the * symmetry between alpha and beta spin. The right contracted ci vector * is (ci1+ci1.T) */ void FCIcontract_2e_spin0(double *eri, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlink * na); FCIcompress_link_tril(clink, link_index, na, nlink); memset(ci1, 0, sizeof(double)*na*na); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nlink, clink, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < na; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, na-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static, 112) /* strk starts from MAX(strk0, ib), because [0:ib,0:ib] have been evaluated */ for (strk = ib; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, MIN(STRB_BLKSIZE, strk-ib), blen, MIN(STRB_BLKSIZE, strk+1-ib), strk, ib, norb, na, na, nlink, nlink, clink, clink); } FCIomp_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, na, blen); } free(ci1buf); free(t1buf); } free(clink); } void FCIcontract_2e_spin1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \ clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } FCIomp_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); } /* * eri_ab is mixed integrals (alpha,alpha|beta,beta), |beta,beta) in small strides */ static void ctr_uhf2e_kern(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1a = t1buf; double *t1b = t1a + nnorb*bcount; double *vt1 = t1b + nnorb*bcount; memset(t1a, 0, sizeof(double)*nnorb*bcount); memset(t1b, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1a, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1b, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_T, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_ab, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_bb, &nnorb, &D1, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_aa, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_ab, &nnorb, &D1, vt1, &bcount); FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void FCIcontract_uhf2e(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri_aa, eri_ab, eri_bb, ci0, ci1, norb, na, nb, nlinka, nlinkb,\ clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)*2); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_uhf2e_kern(eri_aa, eri_ab, eri_bb, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } FCIomp_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); } free(t1buf); free(ci1buf); } free(clinka); free(clinkb); } /************************************************* * hdiag *************************************************/ static void gen_occslist(int *occslist, uint64_t *strs, int nstr, int norb, int nelec) { int i, j; int *pocc; for (i = 0; i < nstr; i++) { pocc = occslist + i * nelec; for (j = 0; j < norb; j++) { if (strs[i] & (1ULL<<j)) { *pocc = j; pocc++; } } } } void FCImake_hdiag_uhf(double *hdiag, double *h1e_a, double *h1e_b, double *jdiag_aa, double *jdiag_ab, double *jdiag_bb, double *kdiag_aa, double *kdiag_bb, int norb, int nstra, int nstrb, int nocca, int noccb, uint64_t *stra, uint64_t *strb) { int *occslista = malloc(sizeof(int) * nocca*nstra); int *occslistb = malloc(sizeof(int) * noccb*nstrb); gen_occslist(occslista, stra, nstra, norb, nocca); gen_occslist(occslistb, strb, nstrb, norb, noccb); #pragma omp parallel default(none) \ shared(hdiag, h1e_a, h1e_b, \ jdiag_aa, jdiag_ab, jdiag_bb, kdiag_aa, kdiag_bb, \ norb, nstra, nstrb, nocca, noccb, occslista, occslistb) { int j, j0, k0, jk, jk0; size_t ia, ib; double e1, e2; int *paocc, *pbocc; #pragma omp for schedule(static) for (ia = 0; ia < nstra; ia++) { paocc = occslista + ia * nocca; for (ib = 0; ib < nstrb; ib++) { e1 = 0; e2 = 0; pbocc = occslistb + ib * noccb; for (j0 = 0; j0 < nocca; j0++) { j = paocc[j0]; jk0 = j * norb; e1 += h1e_a[j*norb+j]; for (k0 = 0; k0 < nocca; k0++) { // (alpha|alpha) jk = jk0 + paocc[k0]; e2 += jdiag_aa[jk] - kdiag_aa[jk]; } for (k0 = 0; k0 < noccb; k0++) { // (alpha|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_ab[jk] * 2; } } for (j0 = 0; j0 < noccb; j0++) { j = pbocc[j0]; jk0 = j * norb; e1 += h1e_b[j*norb+j]; for (k0 = 0; k0 < noccb; k0++) { // (beta|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_bb[jk] - kdiag_bb[jk]; } } hdiag[ia*nstrb+ib] = e1 + e2 * .5; } } } free(occslista); free(occslistb); } void FCImake_hdiag(double *hdiag, double *h1e, double *jdiag, double *kdiag, int norb, int na, int nocc, uint64_t *strs) { FCImake_hdiag_uhf(hdiag, h1e, h1e, jdiag, jdiag, jdiag, kdiag, kdiag, norb, na, na, nocc, nocc, strs, strs); } static int first1(uint64_t r) { #ifdef HAVE_FFS return ffsll(r) - 1; #else int n = 0; if (r >> (n + 32)) n += 32; if (r >> (n + 16)) n += 16; if (r >> (n + 8)) n += 8; if (r >> (n + 4)) n += 4; if (r >> (n + 2)) n += 2; if (r >> (n + 1)) n += 1; return n; #endif } /************************************************* * pspace Hamiltonian, ref CPL, 169, 463 *************************************************/ /* * sub-space Hamiltonian (tril part) of the determinants (stra,strb) */ void FCIpspace_h0tril_uhf(double *h0, double *h1e_a, double *h1e_b, double *g2e_aa, double *g2e_ab, double *g2e_bb, uint64_t *stra, uint64_t *strb, int norb, int np) { const int d2 = norb * norb; const int d3 = norb * norb * norb; #pragma omp parallel default(none) \ shared(h0, h1e_a, h1e_b, g2e_aa, g2e_ab, g2e_bb, \ stra, strb, norb, np) { int i, j, k, pi, pj, pk, pl; int n1da, n1db; uint64_t da, db, str1; double tmp; #pragma omp for schedule(dynamic) for (i = 0; i < np; i++) { for (j = 0; j < i; j++) { da = stra[i] ^ stra[j]; db = strb[i] ^ strb[j]; n1da = FCIpopcount_1(da); n1db = FCIpopcount_1(db); switch (n1da) { case 0: switch (n1db) { case 2: pi = first1(db & strb[i]); pj = first1(db & strb[j]); tmp = h1e_b[pi*norb+pj]; for (k = 0; k < norb; k++) { if (stra[i] & (1ULL<<k)) { tmp += g2e_ab[pi*norb+pj+k*d3+k*d2]; } if (strb[i] & (1ULL<<k)) { tmp += g2e_bb[pi*d3+pj*d2+k*norb+k] - g2e_bb[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, strb[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 4: pi = first1(db & strb[i]); pj = first1(db & strb[j]); pk = first1((db & strb[i]) ^ (1ULL<<pi)); pl = first1((db & strb[j]) ^ (1ULL<<pj)); str1 = strb[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, strb[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_bb[pi*d3+pj*d2+pk*norb+pl] - g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_bb[pi*d3+pj*d2+pk*norb+pl] + g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } } break; case 2: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); tmp = h1e_a[pi*norb+pj]; for (k = 0; k < norb; k++) { if (strb[i] & (1ULL<<k)) { tmp += g2e_ab[pi*d3+pj*d2+k*norb+k]; } if (stra[i] & (1ULL<<k)) { tmp += g2e_aa[pi*d3+pj*d2+k*norb+k] - g2e_aa[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, stra[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 2: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1(db & strb[i]); pl = first1(db & strb[j]); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, strb[j]) > 0) { h0[i*np+j] = g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } else { h0[i*np+j] =-g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } } break; case 4: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1((da & stra[i]) ^ (1ULL<<pi)); pl = first1((da & stra[j]) ^ (1ULL<<pj)); str1 = stra[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_aa[pi*d3+pj*d2+pk*norb+pl] - g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_aa[pi*d3+pj*d2+pk*norb+pl] + g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } } break; } } } } } void FCIpspace_h0tril(double *h0, double *h1e, double *g2e, uint64_t *stra, uint64_t *strb, int norb, int np) { FCIpspace_h0tril_uhf(h0, h1e, h1e, g2e, g2e, g2e, stra, strb, norb, np); } /*********************************************************************** * * With symmetry * * Note the ordering in eri and the index in link_index * eri is a tril matrix, it should be reordered wrt the irrep of the * direct product E_i^j. The 2D array eri(ij,kl) is a diagonal block * matrix. Each block is associated with an irrep. * link_index[str_id,pair_id,0] which is the index of pair_id, should be * reordered wrt the irreps accordingly * * dimirrep stores the number of occurence for each irrep * ***********************************************************************/ static void pick_link_by_irrep(_LinkTrilT *clink, int *link_index, int nstr, int nlink, int eri_irrep) { int i, j, k; for (i = 0; i < nstr; i++) { for (k = 0, j = 0; k < nlink; k++) { if (link_index[k*4+1] == eri_irrep) { clink[j].ia = link_index[k*4+0]; clink[j].addr = link_index[k*4+2]; clink[j].sign = link_index[k*4+3]; j++; } } if (j < nlink) { clink[j].sign = 0; } clink += nlink; link_index += nlink * 4; } } static void ctr_rhf2esym_kern1(double *eri, double *ci0, double *ci1ab, double *ci1buf, double *t1buf, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int nnorb, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, 0, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1ab, vt1, bcount, stra_id, strb_id, 0, nb_intermediate, nlinkb, clink_indexb); spread_bufa_t1(ci1buf, vt1, bcount, bcount, stra_id, 0, 0, ncol_ci1buf, nlinka, clink_indexa); } static void loop_c2e_symm1(double *eri, double *ci0, double *ci1aa, double *ci1ab, int nnorb, int na_intermediate, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clinka, _LinkTrilT *clinkb) { double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1aa, ci1ab, nnorb, na, nb, nlinka, nlinkb, \ na_intermediate, nb_intermediate, clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*nnorb*2); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na_intermediate; strk++) { ctr_rhf2esym_kern1(eri, ci0, ci1ab, ci1buf, t1buf, blen, blen, strk, ib, nnorb, nb_intermediate, na, nb, nlinka, nlinkb, clinka, clinkb); } FCIomp_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1aa+ib, ci1buf, na, nb, blen); } free(ci1buf); free(t1buf); } } #define TOTIRREPS 8 void FCIcontract_2e_symm1(double **eris, double **ci0, double **ci1, int norb, int *nas, int *nbs, int nlinka, int nlinkb, int **linka, int **linkb, int *dimirrep, int wfnsym) { int i; int na = 0; int nb = 0; for (i = 0; i < TOTIRREPS; i++) { na = MAX(nas[i], na); nb = MAX(nbs[i], nb); } _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); int ai_ir, stra_ir, strb_ir, intera_ir, interb_ir, ma, mb; for (stra_ir = 0; stra_ir < TOTIRREPS; stra_ir++) { for (ai_ir = 0; ai_ir < TOTIRREPS; ai_ir++) { strb_ir = wfnsym^stra_ir; ma = nas[stra_ir]; mb = nbs[strb_ir]; if (ma > 0 && mb > 0 && dimirrep[ai_ir] > 0) { intera_ir = ai_ir^stra_ir; interb_ir = ai_ir^strb_ir; // clinka for inter_ir*ai_ir -> stra_ir pick_link_by_irrep(clinka, linka[intera_ir], nas[intera_ir], nlinka, ai_ir); // clinka for strb_ir*ai_ir -> inter_ir pick_link_by_irrep(clinkb, linkb[strb_ir], nbs[strb_ir], nlinkb, ai_ir); loop_c2e_symm1(eris[ai_ir], ci0[stra_ir], ci1[stra_ir], ci1[intera_ir], dimirrep[ai_ir], nas[intera_ir], nbs[interb_ir], ma, mb, nlinka, nlinkb, clinka, clinkb); } } } free(clinka); free(clinkb); }
GB_unop__floor_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__floor_fp64_fp64 // op(A') function: GB_unop_tran__floor_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = floor (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = floor (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = floor (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__floor_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = floor (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__floor_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3mm.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* 3mm.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = (DATA_TYPE) ((i * j + 1) % ni) / (5 * ni); for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = (DATA_TYPE) ((i * (j + 1) + 2) % nj) / (5 * nj); for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = (DATA_TYPE) (i * (j + 3) % nl) / (5 * nl); for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = (DATA_TYPE) ((i * (j + 2) + 2) % nk) / (5 * nk); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("G"); for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { if ((i * ni + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, G[i][j]); } POLYBENCH_DUMP_END("G"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(F, NJ, NL, nj, nl), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl), DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j, k; #pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, nk, A, B) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = SCALAR_VAL(0.0); for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(nj, nl, nm, C, D) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = SCALAR_VAL(0.0); for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nl, nj, E, F) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = SCALAR_VAL(0.0); for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" #include "../operator_common.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { using namespace mxnet_op; const size_t size = static_cast<size_t>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } }); } template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const size_t size = static_cast<size_t>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, cpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const size_t size = static_cast<size_t>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, cpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeWithBool(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[1].type_flag_, EType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<EType>()); } }); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, false, false, false>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } else { LOG(FATAL) << "Not Implemented"; } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) #if MXNET_USE_CUDA struct ElemwiseBinaryRTCCompute { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct ElemwiseBinaryRTCBwdUseNone { std::string LOP; std::string ROP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct ElemwiseBinaryRTCBwdUseIn { std::string LOP; std::string ROP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
GB_unaryop__minv_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_fp64 // op(A') function: GB_tran__minv_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_fp64 ( int32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_sections_reduction.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int test_omp_sections_reduction() { int sum; int known_sum; double dpt,dsum; double dknown_sum; double dt=0.5; /* base of geometric row for + and - test*/ double rounding_error= 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int bit_and; int logic_or; int bit_or; int exclusiv_bit_or; int logics[1000]; int i; int result; /* int my_islarger; */ /*int is_larger=1;*/ sum =7; dpt =1; dsum=0; product =1; logic_and=1; bit_and=1; logic_or=0; bit_or=0; exclusiv_bit_or=0; result = 0; dt = 1./3.; known_sum = (999*1000)/2+7; #pragma omp parallel { #pragma omp sections private(i) reduction(+:sum) { #pragma omp section { for (i=1;i<300;i++) { sum=sum+i; } } #pragma omp section { for (i=300;i<700;i++) { sum=sum+i; } } #pragma omp section { for (i=700;i<1000;i++) { sum=sum+i; } } } } if(known_sum!=sum) { ++result; fprintf(stderr,"Error in sum with integers: Result was %d" " instead of %d\n", sum,known_sum); } diff = (999*1000)/2; #pragma omp parallel { #pragma omp sections private(i) reduction(-:diff) { #pragma omp section { for (i=1;i<300;i++) { diff=diff-i; } } #pragma omp section { for (i=300;i<700;i++) { diff=diff-i; } } #pragma omp section { for (i=700;i<1000;i++) { diff=diff-i; } } } } if(diff != 0) { result++; fprintf(stderr,"Error in Difference with integers: Result was %d" " instead of 0.\n",diff); } for (i=0;i<20;++i) { dpt*=dt; } dknown_sum = (1-dpt)/(1-dt); #pragma omp parallel { #pragma omp sections private(i) reduction(+:dsum) { #pragma omp section { for (i=0;i<6;++i) { dsum += pow(dt,i); } } #pragma omp section { for (i=6;i<12;++i) { dsum += pow(dt,i); } } #pragma omp section { for (i=12;i<20;++i) { dsum += pow(dt,i); } } } } if( fabs(dsum-dknown_sum) > rounding_error ) { result++; fprintf(stderr,"Error in sum with doubles: Result was %f" " instead of %f (Difference: %E)\n", dsum, dknown_sum, dsum-dknown_sum); } dpt=1; for (i=0;i<20;++i) { dpt*=dt; } fprintf(stderr,"\n"); ddiff = (1-dpt)/(1-dt); #pragma omp parallel { #pragma omp sections private(i) reduction(-:ddiff) { #pragma omp section { for (i=0;i<6;++i) { ddiff -= pow(dt,i); } } #pragma omp section { for (i=6;i<12;++i) { ddiff -= pow(dt,i); } } #pragma omp section { for (i=12;i<20;++i) { ddiff -= pow(dt,i); } } } } if(fabs(ddiff) > rounding_error) { result++; fprintf(stderr,"Error in Difference with doubles: Result was %E" " instead of 0.0\n",ddiff); } known_product = 3628800; #pragma omp parallel { #pragma omp sections private(i) reduction(*:product) { #pragma omp section { for(i=1;i<3;i++) { product *= i; } } #pragma omp section { for(i=3;i<7;i++) { product *= i; } } #pragma omp section { for(i=7;i<11;i++) { product *= i; } } } } if(known_product != product) { result++; fprintf(stderr,"Error in Product with integers: Result was %d" " instead of %d\n",product,known_product); } for(i=0;i<1000;i++) { logics[i]=1; } #pragma omp parallel { #pragma omp sections private(i) reduction(&&:logic_and) { #pragma omp section { for (i=1;i<300;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_and = (logic_and && logics[i]); } } } } if(!logic_and) { result++; fprintf(stderr,"Error in logic AND part 1\n"); } logic_and = 1; logics[501] = 0; #pragma omp parallel { #pragma omp sections private(i) reduction(&&:logic_and) { #pragma omp section { for (i=1;i<300;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_and = (logic_and && logics[i]); } } } } if(logic_and) { result++; fprintf(stderr,"Error in logic AND part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) reduction(||:logic_or) { #pragma omp section { for (i=1;i<300;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_or = (logic_or || logics[i]); } } } } if(logic_or) { result++; fprintf(stderr,"\nError in logic OR part 1\n"); } logic_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) reduction(||:logic_or) { #pragma omp section { for (i=1;i<300;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_or = (logic_or || logics[i]); } } } } if(!logic_or) { result++; fprintf(stderr,"Error in logic OR part 2\n"); } for(i=0;i<1000;++i) { logics[i]=1; } #pragma omp parallel { #pragma omp sections private(i) reduction(&:bit_and) { #pragma omp section { for(i=0;i<300;++i) { bit_and = (bit_and & logics[i]); } } #pragma omp section { for(i=300;i<700;++i) { bit_and = (bit_and & logics[i]); } } #pragma omp section { for(i=700;i<1000;++i) { bit_and = (bit_and & logics[i]); } } } } if(!bit_and) { result++; fprintf(stderr,"Error in BIT AND part 1\n"); } bit_and = 1; logics[501]=0; #pragma omp parallel { #pragma omp sections private(i) reduction(&:bit_and) { #pragma omp section { for(i=0;i<300;++i) { bit_and = bit_and & logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_and = bit_and & logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_and = bit_and & logics[i]; } } } } if(bit_and) { result++; fprintf(stderr,"Error in BIT AND part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) reduction(|:bit_or) { #pragma omp section { for(i=0;i<300;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_or = bit_or | logics[i]; } } } } if(bit_or) { result++; fprintf(stderr,"Error in BIT OR part 1\n"); } bit_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) reduction(|:bit_or) { #pragma omp section { for(i=0;i<300;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_or = bit_or | logics[i]; } } } } if(!bit_or) { result++; fprintf(stderr,"Error in BIT OR part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) reduction(^:exclusiv_bit_or) { #pragma omp section { for(i=0;i<300;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } } } if(exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) reduction(^:exclusiv_bit_or) { #pragma omp section { for(i=0;i<300;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } } } if(!exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n"); } /*printf("\nResult:%d\n",result);*/ return (result==0); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_sections_reduction()) { num_failed++; } } return num_failed; }
ordered-2.c
/* { dg-do compile } */ void f1(void) { #pragma omp ordered asdf /* { dg-error "expected" } */ #pragma omp ordered } /* { dg-error "expected expression" } */
ejercicio8.c
/*Para compilar usar (-lrt: real time library): gcc -O2 SumaVectores.c -o SumaVectores -lrt Para ejecutar use: SumaVectoresC longitud */ #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #include <omp.h>// biblioteca donde se encuentra las funciones paralelas //#define PRINTF_ALL // comentar para quitar el printf ... // que imprime todos los componentes //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") #define VECTOR_GLOBAL// descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) //#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables ... // dinámicas (memoria reutilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 //=2^25 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; double cgt1,cgt2; double ncgt; //para tiempo de ejecución //Leer argumento de entrada (no de componentes del vector) if (argc<2){ printf("Faltan no componentes del vector\n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B) #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución ... // disponible en C a partir de actualización C99 #endif #ifdef VECTOR_GLOBAL if (N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL v3 = (double*) malloc(N*sizeof(double)); if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores #pragma omp parallel { #pragma omp sections { #pragma omp section for(i=0; i<N/2; i++){ v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N } #pragma omp section for(i=(N/2); i<N; i++){ v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N } } } cgt1 = omp_get_wtime(); //Calcular suma de vectores #pragma omp parallel { #pragma omp sections { #pragma omp section for(i=0; i<N/2; i++) v3[i] = v1[i] + v2[i]; #pragma omp section for(i=(N/2); i<N; i++) v3[i] = v1[i] + v2[i]; } } cgt2 = omp_get_wtime(); ncgt=(cgt2-cgt1)/*/(1.e+9)*/; //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,N); for(i=0; i<N; i++) printf("/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n",i,i,i,v1[i],v2[i],v3[i]); #else printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) // V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 free(v3); // libera el espacio reservado para v3 #endif return 0; }
parallel-inl.h
// Copyright (c) 2018 Doyub Kim // // I am making my contributions/submissions to this project solely in my // personal capacity and am not conveying any rights to any intellectual // property of any third parties. #ifndef INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #define INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #include "array_utils-inl.h" #include "array_utils.h" #include "constants.h" #include "macros.h" #include <algorithm> #include <functional> #include <future> #include <vector> #ifdef JET_TASKING_TBB #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(JET_TASKING_CPP11THREADS) #include <thread> #endif namespace vox { namespace geometry { namespace internal { // NOTE - This abstraction takes a lambda which should take captured // variables by *value* to ensure no captured references race // with the task itself. template <typename TASK_T> inline void schedule(TASK_T &&fcn) { #ifdef JET_TASKING_TBB struct LocalTBBTask : public tbb::task { TASK_T func; tbb::task *execute() override { func(); return nullptr; } LocalTBBTask(TASK_T &&f) : func(std::forward<TASK_T>(f)) {} }; auto *tbb_node = new (tbb::task::allocate_root()) LocalTBBTask(std::forward<TASK_T>(fcn)); tbb::task::enqueue(*tbb_node); #elif defined(JET_TASKING_CPP11THREADS) std::thread thread(fcn); thread.detach(); #else // OpenMP or Serial --> synchronous! fcn(); #endif } template <typename TASK_T> using operator_return_t = typename std::result_of<TASK_T()>::type; // NOTE - see above, same issues associated with schedule() template <typename TASK_T> inline auto async(TASK_T &&fcn) -> std::future<operator_return_t<TASK_T>> { using package_t = std::packaged_task<operator_return_t<TASK_T>()>; auto task = new package_t(std::forward<TASK_T>(fcn)); auto future = task->get_future(); schedule([=]() { (*task)(); delete task; }); return future; } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<std::future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { parallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(internal::async([=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(internal::async( [=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } merge(a, size, temp, compareFunction); } } } // namespace internal template <typename RandomIterator, typename T> void parallelFill(const RandomIterator &begin, const RandomIterator &end, const T &value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } size_t size = static_cast<size_t>(diff); parallelFor( kZeroSize, size, [begin, value](size_t i) { begin[i] = value; }, policy); } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void parallelFor(IndexType start, IndexType end, const Function &func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(start, end, func); } else { for (auto i = start; i < end; ++i) { func(i); } } #elif JET_TASKING_CPP11THREADS // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&func](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; k++) { func(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(launchRange, i1, end); } // Wait for jobs to finish for (std::thread &t : pool) { if (t.joinable()) { t.join(); } } #else #ifdef JET_TASKING_OPENMP if (policy == ExecutionPolicy::kParallel) { #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = start; i < ssize_t(end); ++i) { #else // !MSVC || Intel for (auto i = start; i < end; ++i) { #endif // MSVC && !Intel func(i); } } else { for (auto i = start; i < end; ++i) { func(i); } } #else // JET_TASKING_OPENMP for (auto i = start; i < end; ++i) { func(i); } #endif // JET_TASKING_OPENMP #endif } template <typename IndexType, typename Function> void parallelRangeFor(IndexType start, IndexType end, const Function &func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(tbb::blocked_range<IndexType>(start, end), [&func](const tbb::blocked_range<IndexType> &range) { func(range.begin(), range.end()); }); } else { func(start, end); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(internal::async([=]() { func(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { func(i1, end); })); } // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } #endif } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function &function, ExecutionPolicy policy) { parallelFor( beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function &function, ExecutionPolicy policy) { parallelRangeFor( beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function &function, ExecutionPolicy policy) { parallelFor( beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function &function, ExecutionPolicy policy) { parallelRangeFor( beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value parallelReduce(IndexType start, IndexType end, const Value &identity, const Function &func, const Reduce &reduce, ExecutionPolicy policy) { if (start > end) { return identity; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { return tbb::parallel_reduce( tbb::blocked_range<IndexType>(start, end), identity, [&func](const tbb::blocked_range<IndexType> &range, const Value &init) { return func(range.begin(), range.end(), init); }, reduce); } else { (void)reduce; return func(start, end, identity); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = func(k1, k2, identity); }; // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); unsigned int tid = 0; for (; tid + 1 < numThreads && i1 < end; ++tid) { pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { launchRange(i1, end, tid); })); } // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value &val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } template <typename RandomIterator, typename CompareFunction> void parallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (end < begin) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_sort(begin, end, compareFunction); } else { std::sort(begin, end, compareFunction); } #else size_t size = static_cast<size_t>(end - begin); using value_type = typename std::iterator_traits<RandomIterator>::value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; internal::parallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } template <typename RandomIterator> void parallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { parallelSort(begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } } // namespace vox } // namespace geometry #endif // INCLUDE_JET_DETAIL_PARALLEL_INL_H_
api.c
// RUN: %libomp-compile-and-run // RUN: %libomp-run | %python %S/check.py -c 'CHECK' %s // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define XSTR(x) #x #define STR(x) XSTR(x) #define streqls(s1, s2) (!strcmp(s1, s2)) #define check(condition) \ if (!(condition)) { \ fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \ __LINE__); \ exit(1); \ } #define BUFFER_SIZE 1024 int main(int argc, char** argv) { char buf[BUFFER_SIZE]; size_t needed; omp_set_affinity_format("0123456789"); needed = omp_get_affinity_format(buf, BUFFER_SIZE); check(streqls(buf, "0123456789")); check(needed == 10) // Check that it is truncated properly omp_get_affinity_format(buf, 5); check(streqls(buf, "0123")); #pragma omp parallel { char my_buf[512]; size_t needed = omp_capture_affinity(my_buf, 512, NULL); check(streqls(my_buf, "0123456789")); check(needed == 10); // Check that it is truncated properly omp_capture_affinity(my_buf, 5, NULL); check(streqls(my_buf, "0123")); } #pragma omp parallel num_threads(4) { omp_display_affinity(NULL); } return 0; } // CHECK: num_threads=4 0123456789
ImageSharpenUtils.h
#ifndef CAPTURE3_IMAGE_SHARPEN_UTILS_H #define CAPTURE3_IMAGE_SHARPEN_UTILS_H #include <cmath> #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include "../engine/objects/image/Image.h" namespace Capture3 { static void sharpenImage(Image &image, unsigned int radius, double strength) { // get image size const unsigned int imageArea = image.getSize().getArea(); const unsigned int imageWidth = image.getSize().getWidth(); const unsigned int imageHeight = image.getSize().getHeight(); // Temp data cv::Mat temp(cv::Size(imageWidth, imageHeight), CV_64FC1, cv::Scalar(0)); cv::Mat blur(cv::Size(imageWidth, imageHeight), CV_64FC1, cv::Scalar(0)); // Fetch pointers and size double *labData = image.getLAB().getData(); auto *tempData = (double *) temp.data; auto *blurData = (double *) blur.data; // We will only apply sharpening on the lightness channel, // afterwards we mix this again to create a RGB output. // Sharpening the lightness channel creates a very natural // looking result and avoids heavy color changes around edges. #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < imageArea; i++) { tempData[i] = labData[i * 3]; } // Apply unsharp masking cv::GaussianBlur(temp, blur, cv::Size(0, 0), radius, radius); cv::addWeighted(temp, 1.0 + strength, blur, -strength, 0, blur); // Copy the result #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < imageArea; i++) { labData[i * 3] = blurData[i]; } // Release images blur.release(); temp.release(); // Convert LAB to other channels image.convertLAB(); } } #endif // CAPTURE3_IMAGE_HISTOGRAM_UTILS_H
GB_unaryop__abs_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint16 // op(A') function: GB_tran__abs_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint16 ( uint16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
miscentered_delta_sigma.c
#include "miscentered_delta_sigma.h" int calc_miscentered_delta_sigma(double*Rp,double Mass,double concentration, int delta,double Rmis,double*R, double*sigma_r,double*miscentered_sigma_r, int NR,double*miscentered_delta_sigma, double*err,cosmology cosmo){ int i, status=0; #pragma omp parallel shared(R,sigma_r,NR,miscentered_sigma_r,miscentered_delta_sigma,err,status) #pragma omp for for(i = 0; i < NR; i++){ status |= calc_miscentered_delta_sigma_at_r(Rp[i],Mass,concentration,delta, Rmis,R,sigma_r, miscentered_sigma_r, NR,&miscentered_delta_sigma[i], &err[i],cosmo); } }
hybrid_report_mask.c
/* Routine reports hybrid affinity information for MPI processes within an OpenMP region. Within a parallel region-- Rank 0 gathers thread affinities from each rank & reports. a.) Within a master region: Determine maximum length of node name. Gather node names from each rank. Determine if there are multiple compute nodes. Each mpi process (rank) creates static space: for gathering nodes names and length, for collecting affinity masks of all threads within the mpi process (omp_proc_mask). b.) Within the parallel region: Determine the mask for the thread (insert into omp_proc_mask) c.) Within master region: if rank 0 print mask header print masks for rank 0 gather masks from non-rank-0 MPI processes (MPI_Irecv). loop over non-rank-0 processes and print masks (for each thread) if rank != 0 pack omp_proc_mask mask into omp_mask_pac and send to rank 0. Free static spaces Return */ #include <stdio.h> #include <mpi.h> #include <omp.h> #include <sched.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> // basic mask printer-- prints a single row with ncpus number of elements void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask); int hybrid_report_mask(void){ // General int i,j,ierr; int id, rid,tid; int in_mpi, in_omp; int thrd, nthrds; int ncpus, nel_set; // Mask storage static int ** omp_proc_mask; static int * omp_mask_pac; char *dummy; // MPI specific Variables int rank, nranks; MPI_Request *request; MPI_Status *status; static int multi_node = 0; static char *all_names; static int max_name_len; int name_len; char proc_name[MPI_MAX_PROCESSOR_NAME]; // In MPI and parallel region ? MPI_Initialized(&in_mpi); in_omp = omp_in_parallel(); if(in_mpi == 0 || in_omp == 0){ printf("ERROR: ***** Must call hybrid_report_mask() in an OpenMP parallel region in MPI program. ***** \n"); exit(1); } // Get rank number & no of ranks via MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); thrd = omp_get_thread_num(); // thread id nthrds = omp_get_num_threads(); // Number of Threads // Get number of cpus (this gives no. // of cpu_ids in /proc/cpuinfo) ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); // Working only with MPI processes (masters) #pragma omp master { // Get a list of nodes from all ranks. MPI_Get_processor_name(proc_name,&name_len); MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD); all_names = malloc(sizeof(int*)*nranks*(max_name_len+1)); MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR, all_names, max_name_len+1, MPI_CHAR, 0, MPI_COMM_WORLD); // If multiple nodes, make muti_node non-zero. if(rank == 0){ for(id=0;id<nranks;id++){ if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++; } } // Create shared storage for masks (only master allocates) omp_proc_mask = malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) omp_proc_mask[i] = malloc(sizeof(int )*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0; } #pragma omp barrier #pragma omp critical // (boundto -- may not be thread safe) ierr = boundto(&nel_set,omp_proc_mask[thrd]); #pragma omp barrier #pragma omp master { omp_mask_pac = (int *) malloc(sizeof(int)*nranks*nthrds*ncpus); // need packing space for mpi send/recv if(rank == 0){ request = (MPI_Request *) malloc(sizeof(MPI_Request)*nranks); status = (MPI_Status *) malloc(sizeof(MPI_Status )*nranks); print_mask(1, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0]); //print header fflush(stdout); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nranks,nthrds, omp_proc_mask[tid]); } fflush(stdout); for(rid=1;rid<nranks;rid++){ // Receive other rank's packed mask arrays MPI_Irecv(&omp_mask_pac[rid*nthrds*ncpus], nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, &request[rid-1]); } MPI_Waitall(nranks-1,&request[0],&status[0]); for(rid=1;rid<nranks;rid++){ // Print for each rank for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, rid,tid, ncpus, nranks,nthrds, &omp_mask_pac[rid*nthrds*ncpus + tid*ncpus]); } } if(nranks*nthrds > 50) print_mask(1, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0]); //print header fflush(stdout); } // end root printing else{ //all non-root ranks // Pack up the ranks' mask arrays (Uh, should have made one array from beginning!) for( tid=0;tid<nthrds;tid++){ for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id]; } // Send to root MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD); } // end non-root printing // Return allocated space for(i=0;i<nthrds;i++) free(omp_proc_mask[i]); free(omp_proc_mask); free(omp_mask_pac); if(rank == 0 ){ free(request); free(status);} free(all_names); } // end of Master #pragma omp barrier // JIC, what all threads leaving at the same time. }
avx512_gemm.h
#pragma once #include "intgemm/intgemm_config.h" #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW #include "interleave.h" #include "kernels.h" #include "multiply.h" #include "types.h" #include <cassert> #include <cstddef> #include <cstdint> #include <cstdlib> /* AVX512 implementation. * This uses INTGEMM_AVX512BW, INTGEMM_AVX512DQ, and might use AVX512VL * That means it supports mainstream CPUs with AVX512, starting with Skylake * Xeons. * It does not support any Knights / Xeon Phi processors. * * All memory must be 64-byte aligned. */ namespace intgemm { // AVX512 has combined collapse and store instructions: // _mm512_mask_cvtsepi32_storeu_epi16 // _mm512_mask_cvtsepi32_storeu_epi8 // So conversion in memory uses these, but I also implement a wider version for // rearranging B. namespace avx512bw { // Load from memory, multiply, and convert to int32_t. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW inline __m512i QuantizerGrab(const float *input, const __m512 quant_mult_reg) { return kernels::quantize(loadu_ps<__m512>(input), quant_mult_reg); } /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_SELECT_COL_B(INTGEMM_AVX512BW, __m512i) // For PrepareB we want to read 8 columns at a time. When converting 32-bit // floats to 8-bit values, that's 32 bytes of floats. But AVX512 is 64 bytes // wide so it reads off the edge of the tile. We could expand the tile size // but then the memory written to won't be contiguous anyway so we'd be doing a // scatter anyway. Easier to just read the 8 columns we wanted as 256 bits // concatenate. INTGEMM_AVX512DQ inline __m512 Concat(const __m256 first, const __m256 second) { // INTGEMM_AVX512DQ but that goes with INTGEMM_AVX512BW anyway. return _mm512_insertf32x8(_mm512_castps256_ps512(first), second, 1); } // Like QuantizerGrab, but allows 32-byte halves (i.e. 8 columns) to be controlled independently. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW inline __m512i QuantizerGrabHalves(const float *input0, const float *input1, const __m512 quant_mult_reg) { __m512 appended = Concat(loadu_ps<__m256>(input0), loadu_ps<__m256>(input1)); appended = _mm512_mul_ps(appended, quant_mult_reg); return _mm512_cvtps_epi32(appended); } // These are only used for reshaping due to the AVX512 instructions // _mm512_mask_cvtsepi32_storeu_epi16 and _mm512_mask_cvtsepi32_storeu_epi8 // being used for the quantizer. class QuantizeTile16 { public: INTGEMM_AVX512BW static inline Register ConsecutiveWithWrapping(FRegister quant_mult, const float *input, Index cols_left, Index cols, Index row_step) { auto input0 = input; auto input1 = input + 16 + (cols_left <= 16 ? cols * (row_step - 1) : 0); auto g0 = QuantizerGrabHalves(input0, input1, quant_mult); auto g1 = QuantizerGrabHalves(input0 + 8, input1 + 8, quant_mult); auto packed = packs_epi32(g0, g1); return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */); } INTGEMM_AVX512BW static inline Register ForReshape(FRegister quant_mult, const float *input, Index cols) { __m512i g0 = QuantizerGrabHalves(input, input + 16 * cols, quant_mult); __m512i g1 = QuantizerGrabHalves(input + 8 * cols, input + 24 * cols, quant_mult); __m512i packed = packs_epi32(g0, g1); // Permute within 256-bit lanes, so same as INTGEMM_AVX2 return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */); } }; class QuantizeTile8 { public: INTGEMM_AVX512BW static inline Register ConsecutiveWithWrapping(FRegister quant_mult, const float *input, Index cols_left, Index cols, Index row_step) { static const __m512i neg127 = _mm512_set1_epi8(-127); static const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0); const float* inputs[4]; for (Index i = 0; i < sizeof(inputs) / sizeof(inputs[0]); ++i) { while (cols_left < sizeof(Register) / sizeof(float)) { input += cols * (row_step - 1); cols_left += cols; } inputs[i] = input; input += sizeof(Register) / sizeof(float); cols_left -= sizeof(Register) / sizeof(float); } auto g0 = QuantizerGrab(inputs[0], quant_mult); auto g1 = QuantizerGrab(inputs[1], quant_mult); auto g2 = QuantizerGrab(inputs[2], quant_mult); auto g3 = QuantizerGrab(inputs[3], quant_mult); auto packed0 = packs_epi32(g0, g1); auto packed1 = packs_epi32(g2, g3); auto packed = _mm512_packs_epi16(packed0, packed1); packed = _mm512_max_epi8(packed, neg127); return _mm512_permutexvar_epi32(shuffle_param, packed); } INTGEMM_AVX512BW static inline __m512i ForReshape(FRegister quant_mult, const float *input, Index cols) { // TODO: try alternative: _mm512_cvtsepi32_epi8 ? const __m512i neg127 = _mm512_set1_epi8(-127); // In reverse order: grabbing the first 32-bit values from each 128-bit register, then the second 32-bit values, etc. const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0); // 32-bit format. __m512i g0 = QuantizerGrabHalves(input, input + 2 * cols, quant_mult); __m512i g1 = QuantizerGrabHalves(input + 16 * cols, input + 18 * cols, quant_mult); __m512i g2 = QuantizerGrabHalves(input + 32 * cols, input + 34 * cols, quant_mult); __m512i g3 = QuantizerGrabHalves(input + 48 * cols, input + 50 * cols, quant_mult); // Pack 32-bit to 16-bit. __m512i packed0 = packs_epi32(g0, g1); __m512i packed1 = packs_epi32(g2, g3); // Pack 16-bit to 8-bit. __m512i packed = _mm512_packs_epi16(packed0, packed1); // Ban -128. packed = _mm512_max_epi8(packed, neg127); // 0 1 2 3 16 17 18 19 32 33 34 35 48 49 50 51 4 5 6 7 20 21 22 23 36 37 38 39 52 53 54 55 8 9 10 11 24 25 26 27 40 41 42 43 56 57 58 59 12 13 14 15 28 29 30 31 44 45 46 47 60 61 62 63 return _mm512_permutexvar_epi32(shuffle_param, packed); } }; struct Kernels16 { typedef int16_t Integer; // Currently A is prepared by quantization but this could theoretically change. // rows * cols must be a multiple of 16. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static inline void PrepareA(const float *input, int16_t *output, float quant_mult, Index rows, Index cols) { Quantize(input, output, quant_mult, rows * cols); } // Technically output can be unaligned in Quantize. // But then it will need to be aligned for Multiply. // size must be a multiple of 16. // Convert to 16-bit signed integers. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static void Quantize(const float *input, int16_t *output, float quant_mult, Index size) { assert(size % 16 == 0); assert(reinterpret_cast<uintptr_t>(input) % 64 == 0); // Fill with the quantization multiplier. const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult); const float *end = input + size; for (; input != end; input += 16, output += 16) { // There doesn't seem to be an unmasked version. _mm512_mask_cvtsepi32_storeu_epi16(output, 0xffff, QuantizerGrab(input, quant_mult_reg)); } } // Tile size for B; B must be a multiple of this block size. static const Index kBTileRow = 32; static const Index kBTileCol = 8; /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_PREPARE_B_16(INTGEMM_AVX512BW, QuantizeTile16) INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, int16_t) INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, QuantizeTile16, int16_t) /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static void SelectColumnsB(const int16_t *input, int16_t *output, Index rows, const Index *cols_begin, const Index *cols_end) { SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows * 2, cols_begin, cols_end); } /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_MULTIPLY16(__m512i, INTGEMM_AVX512BW, CPUType::AVX2) constexpr static const char *const kName = "16-bit AVX512"; static const CPUType kUses = CPUType::AVX512BW; }; struct Kernels8 { typedef int8_t Integer; // Currently A is prepared by quantization but this could theoretically change. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static inline void PrepareA(const float *input, int8_t *output, float quant_mult, Index rows, Index cols) { Quantize(input, output, quant_mult, rows * cols); } private: /* g++ (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0 does not carry target attributes * to the hidden function it creates in implementing #pragma omp parallel for. * So intrinstics were not working inside the for loop when compiled with * OMP. Also, passing register types across #pragma omp parallel for * generated an internal compiler error. * The problem does not occur in g++-8 (Ubuntu 8.3.0-6ubuntu1~18.04.1) 8.3.0. * As a workaround, I split into #pragma omp parallel with boring types * passed across the boundary then call this function with target attributes. */ INTGEMM_AVX512BW static void QuantizeThread(const float *input, int8_t *output, float quant_mult, std::size_t count) { const __m512i neg127 = _mm512_set1_epi32(-127); const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult); const std::size_t kBatch = sizeof(__m512i) / sizeof(float); #pragma omp for for (std::size_t i = 0; i < count; i += kBatch) { __m512i asint = QuantizerGrab(input + i, quant_mult_reg); asint = _mm512_max_epi32(asint, neg127); // There doesn't seem to be an unmasked version. _mm512_mask_cvtsepi32_storeu_epi8(output + i, 0xffff, asint); } } public: // Technically output can be unaligned in Quantize. // But then it will need to be aligned for Multiply. // Convert to 8-bit signed integers. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static void Quantize(const float *input, int8_t *output, float quant_mult, Index size) { assert(reinterpret_cast<uintptr_t>(input) % sizeof(__m512i) == 0); const std::size_t kBatch = sizeof(__m512i) / sizeof(float); std::size_t fast_size = (size & ~(kBatch - 1)); const float *fast_input_end = input + fast_size; int8_t *fast_output_end = output + fast_size; #pragma omp parallel { QuantizeThread(input, output, quant_mult, fast_size); } std::size_t overhang = size & (kBatch - 1); if (!overhang) return; // We needed a branch anyway for the empty case. const __m512i neg127 = _mm512_set1_epi32(-127); const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult); __m512i asint = QuantizerGrab(fast_input_end, quant_mult_reg); asint = _mm512_max_epi32(asint, neg127); _mm512_mask_cvtsepi32_storeu_epi8(fast_output_end, (1 << overhang) - 1, asint); } // Preparing A for the signed/unsigned multiplication. Using add 127 /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static inline void PrepareA(const float *input, uint8_t *output, float quant_mult, Index rows, Index cols) { QuantizeU(input, output, quant_mult, rows * cols); } // Technically output can be unaligned in Quantize. // But then it will need to be aligned for Multiply. // Convert to 8-bit signed integers. /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static void QuantizeU(const float *input, uint8_t *output, float quant_mult, Index size) { assert(size % 16 == 0); assert(reinterpret_cast<uintptr_t>(input) % 64 == 0); const __m512i pos127 = _mm512_set1_epi32(127); const __m512i zero = _mm512_setzero_si512(); const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult); const float *end = input + size; for (; input < end; input += 16, output += 16) { __m512i asint = QuantizerGrab(input, quant_mult_reg); asint = _mm512_min_epi32(asint, pos127); asint = _mm512_add_epi32(asint, pos127); asint = _mm512_max_epi32(asint, zero); _mm512_mask_cvtusepi32_storeu_epi8(output, 0xffff, asint); } } // Tile size for B; B must be a multiple of this block size. static const Index kBTileRow = 64; static const Index kBTileCol = 8; /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_PREPARE_B_8(INTGEMM_AVX512BW, QuantizeTile8) INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, int8_t) INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, QuantizeTile8, int8_t) /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static void SelectColumnsB(const int8_t *input, int8_t *output, Index rows, const Index *cols_begin, const Index *cols_end) { SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows, cols_begin, cols_end); } // Special AVX512 implementation due to having 32 registers (so I don't have to // allocate registers manually) and no sign instruction. template <typename Callback> INTGEMM_AVX512BW static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { // This is copy-paste from Multiply8_SSE2OrAVX2. assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); // There's 8 results for INTGEMM_AVX2 to handle. auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const Index simd_width = width / sizeof(Register); // Added for AVX512. Register zeros = setzero_si<Register>(); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; // Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once. for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { // Iterate over shared (inner) dimension. const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); const Register *A_end = A_live + simd_width; const Register *B_live = B0_col; // Do the first iteration to initialize the sums. __m512i a = *A_live; __mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128)); __m512i a_positive = _mm512_abs_epi8(a); // These will be packed 16-bit integers containing sums for each column of B multiplied by the row of A. Register sum0 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[0], neg_mask, zeros, B_live[0])); Register sum1 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[1], neg_mask, zeros, B_live[1])); Register sum2 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[2], neg_mask, zeros, B_live[2])); Register sum3 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[3], neg_mask, zeros, B_live[3])); Register sum4 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[4], neg_mask, zeros, B_live[4])); Register sum5 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[5], neg_mask, zeros, B_live[5])); Register sum6 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[6], neg_mask, zeros, B_live[6])); Register sum7 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[7], neg_mask, zeros, B_live[7])); ++A_live; B_live += 8; // Use A as the loop variable so the add can be done where gcc likes it // for branch prediction. for (; A_live != A_end; ++A_live, B_live += 8) { // Unique code here: can we do an inline function? // Retrieve a. We will use this as the unsigned part. a = *A_live; // Retrieve the conveniently consecutive values of B. __m512i b0 = *B_live; __m512i b1 = *(B_live + 1); __m512i b2 = *(B_live + 2); __m512i b3 = *(B_live + 3); __m512i b4 = *(B_live + 4); __m512i b5 = *(B_live + 5); __m512i b6 = *(B_live + 6); __m512i b7 = *(B_live + 7); // Get a mask where a is negative. // Didn't seem to make a difference definining sign bits here vs at top neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128)); a_positive = _mm512_abs_epi8(a); // Negate by subtracting from zero with a mask. b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0); b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1); b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2); b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3); b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4); b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5); b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6); b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7); // The magic 8-bit multiply then horizontal sum into 16-bit. b0 = _mm512_maddubs_epi16(a_positive, b0); b1 = _mm512_maddubs_epi16(a_positive, b1); b2 = _mm512_maddubs_epi16(a_positive, b2); b3 = _mm512_maddubs_epi16(a_positive, b3); b4 = _mm512_maddubs_epi16(a_positive, b4); b5 = _mm512_maddubs_epi16(a_positive, b5); b6 = _mm512_maddubs_epi16(a_positive, b6); b7 = _mm512_maddubs_epi16(a_positive, b7); // Now we have 16-bit results that are the sum of two multiplies. // Choosing to approximate and do adds. // Perhaps every so often we could accumulate by upcasting. sum0 = _mm512_adds_epi16(sum0, b0); sum1 = _mm512_adds_epi16(sum1, b1); sum2 = _mm512_adds_epi16(sum2, b2); sum3 = _mm512_adds_epi16(sum3, b3); sum4 = _mm512_adds_epi16(sum4, b4); sum5 = _mm512_adds_epi16(sum5, b5); sum6 = _mm512_adds_epi16(sum6, b6); sum7 = _mm512_adds_epi16(sum7, b7); // Unique code ends: can we do an inline function? } // Upcast to 32-bit and horizontally add. Register ones = set1_epi16<Register>(1); sum0 = madd_epi16(sum0, ones); sum1 = madd_epi16(sum1, ones); sum2 = madd_epi16(sum2, ones); sum3 = madd_epi16(sum3, ones); sum4 = madd_epi16(sum4, ones); sum5 = madd_epi16(sum5, ones); sum6 = madd_epi16(sum6, ones); sum7 = madd_epi16(sum7, ones); Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols)); } } } INTGEMM_MULTIPLY8SHIFT(__m512i, INTGEMM_AVX512BW, CPUType::AVX2) INTGEMM_PREPAREBIASFOR8(__m512i, INTGEMM_AVX512BW, CPUType::AVX2) constexpr static const char *const kName = "8-bit AVX512BW"; static const CPUType kUses = CPUType::AVX512BW; }; } // namespace avx512bw } // namespace intgemm #endif
stream.c
#include <stdio.h> #include <stdlib.h> #include <string.h> int main(int argc, char* argv[]) { const int LENGTH = 2000; printf("Allocating arrays of size %d elements.\n", LENGTH); #pragma 0 double* a = (double*) malloc(sizeof(double) * LENGTH); #pragma 0 double* b = (double*) malloc(sizeof(double) * LENGTH); #pragma 0 double* fast_c = (double*) malloc(sizeof(double) * LENGTH); #pragma default 1 // mlm_set_pool(1); printf("Allocation for fast_c is %llu\n", (unsigned long long int) fast_c); double* c = (double*) malloc(sizeof(double) * LENGTH); printf("Done allocating arrays.\n"); int i; for(i = 0; i < LENGTH; ++i) { a[i] = i; b[i] = LENGTH - i; c[i] = 0; } // Issue a memory copy memcpy(fast_c, c, sizeof(double) * LENGTH); printf("Perfoming the fast_c compute loop...\n"); #pragma omp parallel for for(i = 0; i < LENGTH; ++i) { fast_c[i] = 2.0 * a[i] + 1.5 * b[i]; } // Now copy results back memcpy(c, fast_c, sizeof(double) * LENGTH); double sum = 0; for(i = 0; i < LENGTH; ++i) { sum += c[i]; } printf("Sum of arrays is: %f\n", sum); printf("Freeing arrays...\n"); free(a); free(b); free(c); free(fast_c); printf("Done.\n"); }
KDTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_KDTREE_H_ #define _SPTAG_COMMON_KDTREE_H_ #include <iostream> #include <vector> #include <string> #include <shared_mutex> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details. namespace SPTAG { namespace COMMON { // node type for storing KDT struct KDTNode { SizeType left; SizeType right; DimensionType split_dim; float split_value; }; class KDTree { public: KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {} KDTree(KDTree& other) : m_iTreeNumber(other.m_iTreeNumber), m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit), m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {} ~KDTree() {} inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline SizeType sizePerTree() const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back(); } template <typename T> void Rebuild(VectorIndex* p_index) { COMMON::KDTree newTrees(*this); newTrees.BuildTrees<T>(p_index, nullptr, 1); std::unique_lock<std::shared_timed_mutex> lock(*m_lock); m_pTreeRoots.swap(newTrees.m_pTreeRoots); m_pTreeStart.swap(newTrees.m_pTreeStart); } template <typename T> void BuildTrees(VectorIndex* p_index, std::vector<SizeType>* indices = nullptr, int numOfThreads = omp_get_num_threads()) { std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(p_index->GetNumSamples()); for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } m_pTreeRoots.resize(m_iTreeNumber * localindices.size()); m_pTreeStart.resize(m_iTreeNumber, 0); #pragma omp parallel for num_threads(numOfThreads) for (int i = 0; i < m_iTreeNumber; i++) { Sleep(i * 100); std::srand(clock()); std::vector<SizeType> pindices(localindices.begin(), localindices.end()); std::random_shuffle(pindices.begin(), pindices.end()); m_pTreeStart[i] = i * (SizeType)pindices.size(); std::cout << "Start to build KDTree " << i + 1 << std::endl; SizeType iTreeSize = m_pTreeStart[i]; DivideTree<T>(p_index, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize); std::cout << i + 1 << " KDTree built, " << iTreeSize - m_pTreeStart[i] << " " << pindices.size() << std::endl; } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size(); } bool SaveTrees(std::ostream& p_outstream) const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); p_outstream.write((char*)&m_iTreeNumber, sizeof(int)); p_outstream.write((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); p_outstream.write((char*)&treeNodeSize, sizeof(SizeType)); p_outstream.write((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize); std::cout << "Save KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool SaveTrees(std::string sTreeFileName) const { std::cout << "Save KDT to " << sTreeFileName << std::endl; std::ofstream output(sTreeFileName, std::ios::binary); if (!output.is_open()) return false; SaveTrees(output); output.close(); return true; } bool LoadTrees(char* pKDTMemFile) { m_iTreeNumber = *((int*)pKDTMemFile); pKDTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber); pKDTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pKDTMemFile); pKDTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize); std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool LoadTrees(std::string sTreeFileName) { std::cout << "Load KDT From " << sTreeFileName << std::endl; std::ifstream input(sTreeFileName, std::ios::binary); if (!input.is_open()) return false; input.read((char*)&m_iTreeNumber, sizeof(int)); m_pTreeStart.resize(m_iTreeNumber); input.read((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize; input.read((char*)&treeNodeSize, sizeof(SizeType)); m_pTreeRoots.resize(treeNodeSize); input.read((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize); input.close(); std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } template <typename T> void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (int i = 0; i < m_iTreeNumber; i++) { KDTSearch(p_index, p_query, p_space, m_pTreeStart[i], 0); } } template <typename T> void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits) { auto& tcell = p_space.m_SPTQueue.pop(); KDTSearch(p_index, p_query, p_space, tcell.node, tcell.distance); } } private: template <typename T> void KDTSearch(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const { if (node < 0) { SizeType index = -node - 1; if (index >= p_index->GetNumSamples()) return; #ifdef PREFETCH const char* data = (const char *)(p_index->GetSample(index)); _mm_prefetch(data, _MM_HINT_T0); _mm_prefetch(data + 64, _MM_HINT_T0); #endif if (p_space.CheckAndSet(index)) return; ++p_space.m_iNumberOfTreeCheckedLeaves; ++p_space.m_iNumberOfCheckedLeaves; p_space.m_NGQueue.insert(COMMON::HeapCell(index, p_index->ComputeDistance((const void*)p_query.GetTarget(), (const void*)data))); return; } auto& tnode = m_pTreeRoots[node]; float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value; float distanceBound = distBound + diff * diff; SizeType otherChild, bestChild; if (diff < 0) { bestChild = tnode.left; otherChild = tnode.right; } else { otherChild = tnode.left; bestChild = tnode.right; } p_space.m_SPTQueue.insert(COMMON::HeapCell(otherChild, distanceBound)); KDTSearch(p_index, p_query, p_space, bestChild, distBound); } template <typename T> void DivideTree(VectorIndex* p_index, std::vector<SizeType>& indices, SizeType first, SizeType last, SizeType index, SizeType &iTreeSize) { ChooseDivision<T>(p_index, m_pTreeRoots[index], indices, first, last); SizeType i = Subdivide<T>(p_index, m_pTreeRoots[index], indices, first, last); if (i - 1 <= first) { m_pTreeRoots[index].left = -indices[first] - 1; } else { iTreeSize++; m_pTreeRoots[index].left = iTreeSize; DivideTree<T>(p_index, indices, first, i - 1, iTreeSize, iTreeSize); } if (last == i) { m_pTreeRoots[index].right = -indices[last] - 1; } else { iTreeSize++; m_pTreeRoots[index].right = iTreeSize; DivideTree<T>(p_index, indices, i, last, iTreeSize, iTreeSize); } } template <typename T> void ChooseDivision(VectorIndex* p_index, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last) { std::vector<float> meanValues(p_index->GetFeatureDim(), 0); std::vector<float> varianceValues(p_index->GetFeatureDim(), 0); SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] += v[k]; } } for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] /= count; } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { float dist = v[k] - meanValues[k]; varianceValues[k] += dist*dist; } } // choose the split dimension as one of the dimension inside TOP_DIM maximum variance node.split_dim = SelectDivisionDimension(varianceValues); // determine the threshold node.split_value = meanValues[node.split_dim]; } DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const { // Record the top maximum variances std::vector<DimensionType> topind(m_numTopDimensionKDTSplit); int num = 0; // order the variances for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++) { if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]]) { if (num < m_numTopDimensionKDTSplit) { topind[num++] = i; } else { topind[num - 1] = i; } int j = num - 1; // order the TOP_DIM variances while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]]) { std::swap(topind[j], topind[j - 1]); j--; } } } // randomly choose a dimension from TOP_DIM return topind[COMMON::Utils::rand(num)]; } template <typename T> SizeType Subdivide(VectorIndex* p_index, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const { SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { SizeType ind = indices[i]; const T* v = (const T*)p_index->GetSample(ind); float val = v[node.split_dim]; if (val < node.split_value) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } return i; } private: std::vector<SizeType> m_pTreeStart; std::vector<KDTNode> m_pTreeRoots; public: std::unique_ptr<std::shared_timed_mutex> m_lock; int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples; }; } } #endif
window.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window.c * Description : window * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_c__ #define __libaroma_window_c__ #include <aroma_internal.h> #include "ui_internal.h" #ifdef __cplusplus extern "C" { #endif /* check wm macro */ #define __CHECK_WM(RETVAL) \ if (libaroma_wm()==NULL){ \ ALOGW("window manager uninitialized"); \ return RETVAL; \ } /* * Variable : _libaroma_window_measurement_dp * Type : byte * Descriptions: default measurement */ static byte _libaroma_window_measurement_dp=1; /* * Function : libaroma_window_usedp * Return Value: byte * Descriptions: use dp for measurement */ byte libaroma_window_usedp(byte isdp){ if (isdp==1){ _libaroma_window_measurement_dp=1; } else if (!isdp){ _libaroma_window_measurement_dp=0; } return _libaroma_window_measurement_dp; } /* End of libaroma_window_usedp */ /* * Function : libaroma_window_measure_point * Return Value: int * Descriptions: mesure point */ int libaroma_window_measure_point(int x){ if (_libaroma_window_measurement_dp){ return libaroma_dp(x); } return x; } /* End of libaroma_window_measure_point */ /* * Function : _libaroma_window_measure_save * Return Value: void * Descriptions: save measurement value */ void _libaroma_window_measure_save(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win!=NULL){ if (_libaroma_window_measurement_dp){ win->left = libaroma_px(win->x); win->top = libaroma_px(win->y); win->width= libaroma_px(win->w); win->height= libaroma_px(win->h); } else{ win->left = win->x; win->top = win->y; win->width= win->w; win->height= win->h; } } if (ctl!=NULL){ if (_libaroma_window_measurement_dp){ ctl->left = libaroma_px(ctl->x); ctl->top = libaroma_px(ctl->y); ctl->width= libaroma_px(ctl->w); ctl->height= libaroma_px(ctl->h); } else{ ctl->left = ctl->x; ctl->top = ctl->y; ctl->width= ctl->w; ctl->height= ctl->h; } } } /* End of _libaroma_window_measure_save */ /* * Function : libaroma_window_measure_calculate * Return Value: int * Descriptions: calculate measurement */ int libaroma_window_measure_calculate( int cv, int pos, int max, int is_size, int x){ if (is_size){ if (pos<=0){ switch (pos){ case LIBAROMA_POS_HALF: return (max / 2)-x; break; case LIBAROMA_POS_1P3: return (max / 3)-x; break; case LIBAROMA_POS_2P3: return (max * 2 / 3)-x; break; case LIBAROMA_POS_1P4: return (max / 4)-x; break; case LIBAROMA_POS_3P4: return (max * 3 / 4)-x; break; case LIBAROMA_SIZE_FULL: return max; break; case LIBAROMA_SIZE_HALF: return max / 2; break; case LIBAROMA_SIZE_THIRD: return max / 3; break; case LIBAROMA_SIZE_QUARTER: return max / 4; break; default: return abs(pos); } } } else{ if (pos<0){ switch (pos){ case LIBAROMA_POS_HALF: return max / 2; break; case LIBAROMA_POS_1P3: return max / 3; break; case LIBAROMA_POS_2P3: return max * 2 / 3; break; case LIBAROMA_POS_1P4: return max / 4; break; case LIBAROMA_POS_3P4: return max * 3 / 4; break; default: return abs(pos); } } } return cv; } /* End of libaroma_window_measure_calculate */ /* * Function : libaroma_window_measure_size * Return Value: byte * Descriptions: measure window size */ byte libaroma_window_measure_size(LIBAROMA_WINDOWP win){ if (win){ if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } if (_libaroma_window_measurement_dp){ win->x = libaroma_dp(win->rx); win->y = libaroma_dp(win->ry); win->w = libaroma_dp(win->rw); win->h = libaroma_dp(win->rh); } else{ win->x = win->rx; win->y = win->ry; win->w = win->rw; win->h = win->rh; } win->ax=win->x; win->ay=win->y; win->x=libaroma_window_measure_calculate( win->x, win->rx, libaroma_wm()->w, 0, 0 ); win->y=libaroma_window_measure_calculate( win->y, win->ry, libaroma_wm()->h, 0, 0 ); win->w=libaroma_window_measure_calculate( win->w, win->rw, libaroma_wm()->w, 1, win->x ); win->h=libaroma_window_measure_calculate( win->h, win->rh, libaroma_wm()->h, 1, win->y ); if (win->w+win->x>libaroma_wm()->w){ win->w = libaroma_wm()->w-win->x; } if (win->h+win->y>libaroma_wm()->h){ win->h = libaroma_wm()->h-win->y; } _libaroma_window_measure_save(win,NULL); LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } return 0; } /* End of libaroma_window_measure */ /* * Function : _libaroma_window_ui_thread * Return Value: byte * Descriptions: window ui thread */ byte _libaroma_window_ui_thread(LIBAROMA_WINDOWP win) { int i; byte need_sync = 0; if (win->active==1){ LIBAROMA_CONTROLP toast_ctl=NULL; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (!libaroma_ctl_is_toast(c)){ //if not a toast, draw now if (c->handler->thread(c)){ if (libaroma_control_draw(c,0)){ libaroma_wm_updatesync( c->x+win->x, c->y+win->y, c->w, c->h, 0 ); need_sync=1; } } } else toast_ctl=c; //else, save it for draw at last } } if (toast_ctl!=NULL){ if (libaroma_control_draw(toast_ctl, 0)){ libaroma_wm_updatesync( toast_ctl->x+win->x, toast_ctl->y+win->y, toast_ctl->w, toast_ctl->h, 0 ); libaroma_png_save(win->dc, "/tmp/dc.png"); if (!need_sync) need_sync=1; } } } return need_sync; } /* End of _libaroma_window_ui_thread */ /* * Function : libaroma_window * Return Value: LIBAROMA_WINDOWP * Descriptions: creates a new window */ LIBAROMA_WINDOWP libaroma_window( char * bg_theme_name, int x, int y, int w, int h ){ __CHECK_WM(NULL); LIBAROMA_WINDOWP win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!win){ ALOGW("libaroma_window alloc window data failed"); return NULL; } if (bg_theme_name){ snprintf(win->theme_bg,256,"%s",bg_theme_name); } else{ win->theme_bg[0]=0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; win->onpool=1; win->prev_screen = libaroma_fb_snapshoot_canvas(); win->ui_thread = _libaroma_window_ui_thread; libaroma_window_measure_size(win); return win; } /* End of libaroma_window */ /* * Function : libaroma_window_free * Return Value: byte * Descriptions: free window */ byte libaroma_window_free( LIBAROMA_WINDOWP win ){ __CHECK_WM(0); if (win==NULL){ return 0; } /* inactivate it */ if (win->parent==NULL){ if (libaroma_wm_get_active_window()==win){ /* detach active window from window manager */ libaroma_wm_set_active_window(NULL); } LIBAROMA_MSG _msg; libaroma_window_process_event(win, libaroma_wm_compose(&_msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0)); } if (win->handler!=NULL){ if (win->handler->prefree!=NULL){ win->handler->prefree(win); } } /* delete childs */ int i; if (win->childn>0){ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_free(win->childs[i]); } free(win->childs); } if (win->bg){ libaroma_canvas_free(win->bg); win->bg=NULL; } if (win->dc){ libaroma_canvas_free(win->dc); win->dc=NULL; } if (win->handler!=NULL){ if (win->handler->postfree!=NULL){ win->handler->postfree(win); } } free(win); return 1; } /* End of libaroma_window_free */ /* * Function : _libaroma_window_updatebg * Return Value: byte * Descriptions: update window background */ byte _libaroma_window_updatebg(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (win->handler!=NULL){ if (win->handler->updatebg!=NULL){ if (win->handler->updatebg(win)){ if (win->onupdatebg){ win->onupdatebg(win,win->bg); } return 1; } return 0; } } if (win->parent!=NULL){ return 0; } int w = win->w; int h = win->h; /* draw background */ if (win->bg!=NULL){ if ((win->bg->w==w)&&(win->bg->h==h)){ /* not need recreate background */ return 1; } libaroma_canvas_free(win->bg); } win->bg = libaroma_canvas(w,h); /* default canvas color */ libaroma_canvas_setcolor( win->bg, libaroma_colorget(NULL,win)->window_bg, 0xff ); /* from theme canvas */ if (win->theme_bg[0]!=0){ libaroma_wm_draw_theme( win->bg, win->theme_bg, 0, 0, win->bg->w, win->bg->h, NULL ); } /* from updatebg callback */ if (win->onupdatebg!=NULL){ win->onupdatebg(win,win->bg); } return 1; } /* End of _libaroma_window_updatebg */ /* * Function : _libaroma_window_recalculate * Return Value: byte * Descriptions: recalculate client size */ byte _libaroma_window_recalculate(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (libaroma_window_isactive(win)){ _libaroma_window_updatebg(win); libaroma_window_invalidate(win, 1); } return 1; } /* End of _libaroma_window_recalculate */ /* * Function : _libaroma_window_ready * Return Value: byte * Descriptions: window is ready */ byte _libaroma_window_ready(LIBAROMA_WINDOWP win){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_resize win is NULL"); return 0; } int x = win->x; int y = win->y; int w = win->w; int h = win->h; if (w==0){ w = libaroma_wm()->w; x = 0; } if (h==0){ h = libaroma_wm()->h; y = 0; } /* set position */ if (win->dc!=NULL){ libaroma_canvas_free(win->dc); win->dc=NULL; } win->dc= libaroma_wm_canvas(x, y, w, h); if (win->dc==NULL){ ALOGW("window_ready cannot allocate workspace drawing canvas"); return 0; }/* if (libaroma_window_isactive(win)){ libaroma_wm_clean_workspace(); }*/ win->x = x; win->y = y; win->w = win->dc->w; win->h = win->dc->h; _libaroma_window_measure_save(win,NULL); _libaroma_window_recalculate(win); return 1; } /* End of _libaroma_window_ready */ /* * Function : libaroma_window_resize * Return Value: byte * Descriptions: resize window */ byte libaroma_window_resize( LIBAROMA_WINDOWP win, int x, int y, int w, int h ){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; if (libaroma_window_measure_size(win)){ return _libaroma_window_ready(win); } return 0; } /* End of libaroma_window_resize */ /* * Function : libaroma_window_isactive * Return Value: byte * Descriptions: check if window is active */ byte libaroma_window_isactive(LIBAROMA_WINDOWP win){ if (win!=NULL){ LIBAROMA_WINDOWP w = win; while(w->parent){ w=w->parent; } return ((w==libaroma_wm_get_active_window())?1:0); } return 0; } /* End of libaroma_window_isactive */ /* * Function : libaroma_window_add * Return Value: byte * Descriptions: add control into window */ byte libaroma_window_add( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_add win is NULL"); return 0; } if (ctl==NULL){ ALOGW("window_add ctl is NULL"); return 0; } if (ctl->window != NULL){ ALOGW("window_add ctl already have window"); return 0; } libaroma_window_measure(win, ctl); if (win->childn==0){ win->childs = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)); if (!win->childs){ ALOGW("window_add malloc failed"); win->childs=NULL; return 0; } win->childs[0]=ctl; } else{ LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) realloc(win->childs, sizeof(LIBAROMA_CONTROLP)*(win->childn+1)); if (!newchilds){ ALOGW("window_add realloc failed"); return 0; } win->childs = newchilds; win->childs[win->childn] = ctl; } ctl->window = win; win->childn++; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_add */ /* * Function : libaroma_window_del * Return Value: byte * Descriptions: delete control from window */ byte libaroma_window_del( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (ctl==NULL){ ALOGW("window_del ctl is null"); return 0; } if (win==NULL){ ALOGW("window_del win is null"); return 0; } if (win != ctl->window){ return 0; } if (win->childn<=0){ ALOGW("window_del window data corrupt doesn't have childs??"); return 0; } else if (win->childn==1){ if (win->childs[0]==ctl){ ctl->window = NULL; win->childn=0; free(win->childs); win->childs=NULL; _libaroma_window_recalculate(win); return 1; } else{ ALOGW("window_del ctl not found in window"); return 0; } } LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)*(win->childn-1)); if (!newchilds){ ALOGW("window_del malloc temp childs failed"); return 0; } int j = 0; int i; for (i=0;i<win->childn;i++){ if (win->childs[i]!=ctl){ newchilds[j++]=win->childs[i]; if (j==win->childn-2){ /* current ctl not found */ free(newchilds); ALOGW("window_del ctl not found in window"); return 0; } } } free(win->childs); win->childs=newchilds; win->childn--; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_del */ /* * Function : libaroma_window_measure * Return Value: byte * Descriptions: measure control size */ byte libaroma_window_measure(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win&&ctl){ if (_libaroma_window_measurement_dp){ ctl->x = libaroma_dp(ctl->rx); ctl->y = libaroma_dp(ctl->ry); ctl->w = libaroma_dp(ctl->rw); ctl->h = libaroma_dp(ctl->rh); } else{ ctl->x = ctl->rx; ctl->y = ctl->ry; ctl->w = ctl->rw; ctl->h = ctl->rh; } ctl->x=libaroma_window_measure_calculate( ctl->x, ctl->rx, win->w, 0, 0 ); ctl->y=libaroma_window_measure_calculate( ctl->y, ctl->ry, win->h, 0, 0 ); ctl->w=libaroma_window_measure_calculate( ctl->w,ctl->rw, win->w, 1, ctl->x ); ctl->h=libaroma_window_measure_calculate( ctl->h,ctl->rh, win->h, 1, ctl->y ); if (ctl->w+ctl->x>win->w){ ctl->w = win->w-ctl->x; } if (ctl->h+ctl->y>win->h){ ctl->h = win->h-ctl->y; } if (ctl->w<ctl->minw){ ctl->w=ctl->minw; } if (ctl->h<ctl->minh){ ctl->h=ctl->minh; } _libaroma_window_measure_save(NULL,ctl); if (ctl->handler->message){ LIBAROMA_MSG _msg; ctl->handler->message(ctl, libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } } return 0; } /* End of libaroma_window_measure */ /* * Function : libaroma_window_attach * Return Value: LIBAROMA_CONTROLP * Descriptions: attach control into window */ LIBAROMA_CONTROLP libaroma_window_attach( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ /* attach into window */ if (win){ if (libaroma_window_add(win,ctl)){ return ctl; } ALOGW("window_attach cannot attach into window"); libaroma_control_free(ctl); return NULL; } return ctl; } /* End of libaroma_window_attach */ /* * Function : libaroma_window_getid * Return Value: LIBAROMA_CONTROLP * Descriptions: get control by id */ LIBAROMA_CONTROLP libaroma_window_getid( LIBAROMA_WINDOWP win, word id){ __CHECK_WM(NULL); if (win==NULL){ ALOGW("window_control_id win is null"); return NULL; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->id==id){ return win->childs[i]; } } return NULL; /* not found */ } /* End of libaroma_window_getid */ /* * Function : libaroma_window_setfocus * Return Value: LIBAROMA_CONTROLP * Descriptions: set control focus */ LIBAROMA_CONTROLP libaroma_window_setfocus( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win==NULL){ ALOGW("window_setfocus window is null"); return NULL; } if (ctl!=NULL){ /* set */ if (win!=ctl->window){ ALOGW("window_setfocus control is not window child"); return NULL; } if (ctl->handler->focus!=NULL){ if (win->focused==ctl){ return ctl; } if (ctl->handler->focus(ctl,1)){ if (win->focused){ win->focused->handler->focus(win->focused,0); } win->focused=ctl; return ctl; } } return NULL; } else{ /* find focus */ if (win->focused){ return win->focused; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->focus!=NULL){ return libaroma_window_setfocus(win,win->childs[i]); } } } return NULL; } /* End of libaroma_window_setfocus */ /* * Function : libaroma_window_sync * Return Value: byte * Descriptions: sync window canvas */ byte libaroma_window_sync(LIBAROMA_WINDOWP win, int x, int y, int w, int h){ __CHECK_WM(0); if (win==NULL){ ALOGW("libaroma_window_sync win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->sync!=NULL){ return win->handler->sync(win,x,y,w,h); } } if (win->parent!=NULL){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of libaroma_window_sync */ /* * Function : libaroma_window_invalidate * Return Value: byte * Descriptions: invalidate window drawing */ byte libaroma_window_invalidate(LIBAROMA_WINDOWP win, byte sync){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_invalidate win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->invalidate!=NULL){ return win->handler->invalidate(win,sync); } } if (win->parent!=NULL){ return 0; } if (!libaroma_window_isactive(win)){ ALOGW("window_invalidate win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } if ((!win->lock_sync)||(sync==10)){ /* draw bg */ libaroma_draw( win->dc, win->bg, 0, 0, 1); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ /* draw no sync */ libaroma_control_draw(win->childs[i], 0); } /* sync */ if (sync){ libaroma_window_sync(win, 0, 0, win->w, win->h); } } return 1; } /* End of libaroma_window_invalidate */ /* * Function : libaroma_window_hideshow_animated * Return Value: byte * Descriptions: hide/show window - animated */ byte libaroma_window_hideshow_animated(LIBAROMA_WINDOWP win, byte anim, int duration, byte close){ if ((!anim)||(duration<50)){ if (close) { byte ret=libaroma_wm_set_active_window(NULL); libaroma_window_free(win); return ret; } else return libaroma_wm_set_active_window(win); } /* lock sync */ win->lock_sync = 1; byte is_active; if (close) is_active=1; else is_active=libaroma_wm_set_active_window(win); if (is_active){ if (!close) win->active=2; if (win->prev_screen==NULL) win->prev_screen=libaroma_canvas(win->w, win->h); //TODO: MOVE THIS TO WM CODE //if (!win->prev_screen->alpha && anim==LIBAROMA_WINDOW_SHOW_ANIMATION_CIRCLE) //libaroma_canvas_fillalpha(win->prev_screen, 0, 0, win->w, win->h, 0xFF); //init alpha for prev screen if needed /* draw window into temp canvas */ LIBAROMA_CANVASP wmc = win->dc; //window had a canvas area of wm, let's grab it LIBAROMA_CANVASP tdc = libaroma_canvas(wmc->w,wmc->h); if (close) libaroma_draw(tdc,wmc,0,0,0); win->dc=tdc; /* switch dc to temporary */ //if closing, deactivate window (otherwise ripple animations are played while animate-closing) if (close) libaroma_wm_set_active_window(NULL); else libaroma_window_invalidate(win, 10); //otherwise draw real window image at temp dc long start = libaroma_tick(); int delta = 0; int debug=1; while ((delta=libaroma_tick()-start)<duration){ float state = (((float) delta)/((float) duration)); switch (anim){ case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_TOP: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int y; if (close) y = (swift_out_state * win->h); else y = win->h - (swift_out_state * win->h); int h = win->h - y; if (h>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w, win->h-h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, y, 0, 0, win->w, h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; if (w>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ //libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, 0, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; default:{ state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); if (close) state = (1.0 - state); if ((!close && state>=1.0) || (close && state <=0.0)){ break; } //ALOGV("Playing %s animation with state %1.2f", close?"close":"open", state); libaroma_art_draw_switch_animation(libaroma_ani_win_to_art(anim), wmc, win->prev_screen, win->dc, //this is needed because snapshots are taken //using fb size, not wm workspace size libaroma_wm()->x, libaroma_wm()->y, win->prev_screen->w, win->prev_screen->h, 0, 0, win->w, win->h, state); libaroma_wm_sync(win->x, win->y, win->w, win->h); } break; } libaroma_sleep(12); } if (!close) libaroma_draw(wmc,win->dc,0,0,0); //copy real window image to original canvas win->dc=wmc; /* switch dc to wm canvas area */ libaroma_canvas_free(tdc); } win->lock_sync = 0; /* sync view now */ if (close){ //libaroma_wm_set_active_window(NULL); libaroma_wm_sync(win->x,win->y,win->w,win->h); libaroma_window_free(win); } else { win->active=1; libaroma_wm_sync(win->x,win->y,win->w,win->h); /* send activate */ LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 10, 0) ); } return 1; } /* * Function : libaroma_window_calculate_pos * Return Value: void * Descriptions: calculate screen position to window/control position */ void libaroma_window_calculate_pos( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (win!=NULL){ *x-=win->x; *y-=win->y; } else if ((ctl!=NULL)&&(ctl->window!=NULL)){ *x-=ctl->window->x; *y-=ctl->window->y; } if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; } /* *x-=libaroma_wm()->x; *y-=libaroma_wm()->y; */ } /* End of libaroma_window_calculate_pos */ /* * Function : libaroma_window_calculate_pos_abs * Return Value: void * Descriptions: calculate absolute screen position to top window position */ void libaroma_window_calculate_pos_abs( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; win=ctl->window; } while (win!=NULL){ *x-=win->ax; *y-=win->ay; win=win->parent; } } /* End of libaroma_window_calculate_pos_abs */ /* * Function : _libaroma_window_is_inside * Return Value: byte * Descriptions: check position coordinate */ byte _libaroma_window_is_inside(LIBAROMA_CONTROLP ctl, int x, int y) { int wx = ctl->x; int wx2 = wx + ctl->w; int wy = ctl->y; int wy2 = wy + ctl->h; if ((x >= wx) && (x < wx2) && (y >= wy) && (y < wy2)) { return 1; } return 0; } /* End of _libaroma_window_is_inside */ /* * Function : libaroma_window_post_command * Return Value: byte * Descriptions: post direct command */ byte libaroma_window_post_command(dword cmd){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, 0, 0, (int) cmd, 0, NULL ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_post_command_ex * Return Value: byte * Descriptions: post direct command extended */ byte libaroma_window_post_command_ex(dword cmd, byte state, int key, int y, voidp d){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, state, key, (int) cmd, y, d ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_process_event * Return Value: dword * Descriptions: process message */ dword libaroma_window_process_event(LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_event win is null"); return 0; } if (win->parent!=NULL){ ALOGW("window_event cannot used for child window..."); return 0; } dword ret = 0; if (win->handler){ if (win->handler->message_hooker){ if (win->handler->message_hooker(win,msg,&ret)){ return ret; } } } switch (msg->msg){ case LIBAROMA_MSG_WIN_ACTIVE: { /* set current window size */ win->focused=NULL; win->touched=NULL; if (msg->x!=10){ _libaroma_window_ready(win); } if ((!win->lock_sync)||(msg->x==10)){ if ((!win->active)||(msg->x==10)){ int i; win->active=1; /* signal child */ for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; _libaroma_window_ready(win); for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_INACTIVE: { if (win->active){ /* stop thread manager */ win->active=0; /* send inactive message to child */ int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } win->focused=NULL; win->touched=NULL; } } break; case LIBAROMA_MSG_WIN_MEASURED: { /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; case LIBAROMA_MSG_WIN_DIRECTMSG: { return (dword) msg->x; } break; case LIBAROMA_MSG_WIN_INVALIDATE: { libaroma_window_invalidate(win, 1); } break; case LIBAROMA_MSG_TOUCH: { /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ win->touched = NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ win->touched = win->childs[i]; break; } } if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } } break; } return ret; } /* End of libaroma_window_process_event */ /* * Function : libaroma_window_pool * Return Value: dword * Descriptions: poll window messages */ dword libaroma_window_pool( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("cannot pool child window..."); return 0; } LIBAROMA_MSG _msg; LIBAROMA_MSGP cmsg=(msg!=NULL)?msg:&_msg; byte ret = libaroma_wm_getmessage(cmsg); if (ret){ return libaroma_window_process_event(win,cmsg); } return 0; } /* End of libaroma_window_pool */ #undef __CHECK_WM #ifdef __cplusplus } #endif #endif /* __libaroma_window_c__ */
test_critical.c
//===-- test_critical.c - Test the "critical" construct -----------*- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <omp.h> #include "tests.h" int main(void) { int num_failed = 0; int in_critical = 0; #pragma omp parallel { #pragma omp critical { int locked; // Determine if critical section is currently held by another thread #pragma omp atomic read locked = in_critical; if (locked) { // This should not happen, so count this as an error #pragma omp atomic num_failed++; } else { // This is the correct behavior, so flag this region as being // executed by the current thread. #pragma omp atomic write in_critical = 1; } // Wait a bit before releasing the critical region again. printf("Thread %d: in critical region\n", omp_get_thread_num()); sleep(SLEEPTIME); #pragma omp atomic write in_critical = 0; } } return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS; }
GB_unaryop__abs_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_fp32 // op(A') function: GB_tran__abs_int32_fp32 // C type: int32_t // A type: float // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_fp32 ( int32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__identity_uint32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_bool // op(A') function: GB_tran__identity_uint32_bool // C type: uint32_t // A type: bool // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_bool ( uint32_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction-1.c
#include <omp.h> #include <stdlib.h> int main (void) { int i = 0, j = 0, k = ~0; double d = 1.0; #pragma omp parallel num_threads(4) reduction(+:i) reduction(*:d) reduction(&:k) { if (i != 0 || d != 1.0 || k != ~0) #pragma omp atomic j |= 1; if (omp_get_num_threads () != 4) #pragma omp atomic j |= 2; i = omp_get_thread_num (); d = i + 1; k = ~(1 << (2 * i)); } if (j & 1) abort (); if ((j & 2) == 0) { if (i != (0 + 1 + 2 + 3)) abort (); if (d != (1.0 * 2.0 * 3.0 * 4.0)) abort (); if (k != (~0 ^ 0x55)) abort (); } return 0; }
otbSampleAugmentation.h
/* * Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES) * * This file is part of Orfeo Toolbox * * https://www.orfeo-toolbox.org/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef otbSampleAugmentation_h #define otbSampleAugmentation_h #ifdef _OPENMP #include <omp.h> #endif #include <vector> #include <algorithm> #include <random> #include <ctime> #include <cassert> namespace otb { namespace sampleAugmentation { using SampleType = std::vector<double>; using SampleVectorType = std::vector<SampleType>; /** Estimate standard deviations of the components in one pass using Welford's algorithm */ SampleType EstimateStds(const SampleVectorType& samples) { const auto nbSamples = samples.size(); const long nbComponents = static_cast<long>(samples[0].size()); SampleType stds(nbComponents, 0.0); SampleType means(nbComponents, 0.0); for (size_t i = 0; i < nbSamples; ++i) { auto norm_factor = 1.0 / (i + 1); #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) { const auto mu = means[j]; const auto x = samples[i][j]; auto muNew = mu + (x - mu) * norm_factor; stds[j] += (x - mu) * (x - muNew); means[j] = muNew; } } #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) { stds[j] = std::sqrt(stds[j] / nbSamples); } return stds; } /** Create new samples by replicating input samples. We loop through * the input samples and add them to the new data set until nbSamples * are added. The elements of newSamples are removed before proceeding. */ void ReplicateSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); size_t imod{0}; #ifdef _OPENMP #pragma omp parallel for #endif for (long long i = 0; i < nbSamplesLL; ++i) { if (imod == inSamples.size()) imod = 0; newSamples[i] = inSamples[imod++]; } } /** Create new samples by adding noise to existing samples. Gaussian * noise is added to randomly selected samples. The standard deviation * of the noise added to each component is the same as the one of the * input variables divided by stdFactor (defaults to 10). The * elements of newSamples are removed before proceeding. */ void JitterSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, float stdFactor = 10, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long nbComponents = static_cast<long>(inSamples[0].size()); std::random_device rd; std::mt19937 gen(rd()); // The input samples are selected randomly with replacement std::srand(seed); // We use one gaussian distribution per component since they may // have different stds auto stds = EstimateStds(inSamples); std::vector<std::normal_distribution<double>> gaussDis(nbComponents); #ifdef _OPENMP #pragma omp parallel for #endif for (long i = 0; i < nbComponents; ++i) gaussDis[i] = std::normal_distribution<double>{0.0, stds[i] / stdFactor}; for (size_t i = 0; i < nbSamples; ++i) { newSamples[i] = inSamples[std::rand() % inSamples.size()]; #ifdef _OPENMP #pragma omp parallel for #endif for (long j = 0; j < nbComponents; ++j) newSamples[i][j] += gaussDis[j](gen); } } struct NeighborType { size_t index; double distance; }; struct NeighborSorter { constexpr bool operator()(const NeighborType& a, const NeighborType& b) const { return b.distance > a.distance; } }; double ComputeSquareDistance(const SampleType& x, const SampleType& y) { assert(x.size() == y.size()); double dist{0}; for (size_t i = 0; i < x.size(); ++i) { dist += (x[i] - y[i]) * (x[i] - y[i]); } return dist / (x.size() * x.size()); } using NNIndicesType = std::vector<NeighborType>; using NNVectorType = std::vector<NNIndicesType>; /** Returns the indices of the nearest neighbors for each input sample */ void FindKNNIndices(const SampleVectorType& inSamples, const size_t nbNeighbors, NNVectorType& nnVector) { const long long nbSamples = static_cast<long long>(inSamples.size()); nnVector.resize(nbSamples); #ifdef _OPENMP #pragma omp parallel for #endif for (long long sampleIdx = 0; sampleIdx < nbSamples; ++sampleIdx) { NNIndicesType nns; for (long long neighborIdx = 0; neighborIdx < nbSamples; ++neighborIdx) { if (sampleIdx != neighborIdx) nns.push_back({static_cast<size_t>(neighborIdx), ComputeSquareDistance(inSamples[sampleIdx], inSamples[neighborIdx])}); } std::partial_sort(nns.begin(), nns.begin() + nbNeighbors, nns.end(), NeighborSorter{}); nns.resize(nbNeighbors); nnVector[sampleIdx] = std::move(nns); } } /** Generate the new sample in the line linking s1 and s2 */ SampleType SmoteCombine(const SampleType& s1, const SampleType& s2, double position) { auto result = s1; for (size_t i = 0; i < s1.size(); ++i) result[i] = s1[i] + (s2[i] - s1[i]) * position; return result; } /** Create new samples using the SMOTE algorithm Chawla, N. V., Bowyer, K. W., Hall, L. O., & Kegelmeyer, W. P., Smote: synthetic minority over-sampling technique, Journal of artificial intelligence research, 16(), 321–357 (2002). http://dx.doi.org/10.1613/jair.953 */ void Smote(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, const int nbNeighbors, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); NNVectorType nnVector; FindKNNIndices(inSamples, nbNeighbors, nnVector); // The input samples are selected randomly with replacement std::srand(seed); #ifdef _OPENMP #pragma omp parallel for #endif for (long long i = 0; i < nbSamplesLL; ++i) { const auto sampleIdx = std::rand() % (inSamples.size()); const auto sample = inSamples[sampleIdx]; const auto neighborIdx = nnVector[sampleIdx][std::rand() % nbNeighbors].index; const auto neighbor = inSamples[neighborIdx]; newSamples[i] = SmoteCombine(sample, neighbor, std::rand() / double{RAND_MAX}); } } } // end namespaces sampleAugmentation } // end namespace otb #endif
function-call-1.c
#define size 8 #pragma omp declare target int identity (int x) { return x; } int expx (int x, int n) { for (int i = 0; i < n - 1; i++) x *= x; return x; } float init (int x, int y) { int x1 = identity (identity (identity (identity (x)))); int y1 = identity (identity (identity (identity (y)))); int x2 = expx (x1, 2); int y2 = expx (y1, 2); return (x2 + y2); } #pragma omp end declare target int main () { int i, j; int a[size][size]; #pragma omp target teams map(to:a[:size][:size]) #pragma omp distribute parallel for default(none) private(i, j) shared(a) for (i = 0; i < size; ++i) for (j = 0; j < size; ++j) a[i][j] = init (i, j); for (i = 0; i < size; ++i) for (j = 0; j < size; ++j) if (i * i + j * j != a[i][j]) __builtin_abort (); return 0; }
ks_cpp.c
#include <stdio.h> #include <malloc.h> #include <assert.h> #include <memory.h> #include <math.h> // C bool typedef enum { true=1, false=0 } bool; inline void update_min(float* p1, float v2) { if (v2 < *p1) *p1 = v2; } // https://stackoverflow.com/questions/28258590/using-openmp-to-get-the-index-of-minimum-element-parallelly struct Compare { float val; size_t index; }; #pragma omp declare reduction(maximum : struct Compare : omp_out = omp_in.val > omp_out.val ? omp_in : omp_out) void kennard_stone(float* cdist, size_t* seed, size_t* result, float* v_dist, size_t n_sample, size_t n_seed, size_t n_result) { // 00. Assertions and Result Vector Initialization struct Compare sup; if (n_seed == 2) v_dist[0] = cdist[seed[0] * n_sample + seed[1]]; if (n_seed == 0) { size_t n_sample_2 = n_sample * n_sample; sup.val = -1.; sup.index = 0; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample_2; ++i) { if (cdist[i] > sup.val) { sup.val = cdist[i]; sup.index = i; } } seed[0] = sup.index / n_sample; seed[1] = sup.index % n_sample; n_seed = 2; v_dist[0] = sup.val; } n_result = n_result == 0 ? n_sample : n_result; assert(n_result <= n_sample); assert(n_seed <= n_sample); memcpy(result, seed, n_seed * sizeof(size_t)); memset(result + n_seed, 0, (n_result - n_seed) * sizeof(size_t)); // 01. Scratch Area Initialization bool* selected = (bool*)malloc(n_sample * sizeof(bool)); memset(selected, false, n_sample * sizeof(bool)); for (size_t i = 0; i < n_seed; ++i) selected[result[i]] = true; // 02. Minimum Out-of-Group Initialization float* min_vals = (float*)malloc(n_sample * sizeof(float)); memcpy(min_vals, cdist + n_sample * result[0], n_sample * sizeof(float)); for (size_t n = 1; n < n_seed; ++n) { size_t idx_starting = result[n] * n_sample; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], cdist[idx_starting + i]); } } // 03. Main Algorithm for (size_t n = n_seed; n < n_result; ++n) { // Find sup of the minimum sup.val = -1.; sup.index = 0; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } v_dist[n - 1] = sup.val; selected[sup.index] = true; result[n] = sup.index; size_t idx_starting = sup.index * n_sample; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], cdist[idx_starting + i]); } } free(selected); free(min_vals); } float euclid_distance_vector(float* x1, float* x2, size_t n_feature) { float res = 0.; do { res += (*x1 - *x2) * (*x1 - *x2); ++x1, ++x2; } while (--n_feature); return sqrtf(res); } void kennard_stone_mem(float* X, size_t* seed, size_t* result, float* v_dist, size_t n_sample, size_t n_feature, size_t n_seed, size_t n_result) { // 00. Assertions and Result Vector Initialization struct Compare sup; if (n_seed == 2) v_dist[0] = euclid_distance_vector(X + n_feature * seed[0], X + n_feature * seed[1], n_feature); assert(n_seed != 0); // Seed should be supplied from outer program. assert(n_result <= n_sample); assert(n_seed <= n_sample); memcpy(result, seed, n_seed * sizeof(size_t)); memset(result + n_seed, 0, (n_result - n_seed) * sizeof(size_t)); // 01. Scratch Area Initialization bool* selected = (bool*)malloc(n_sample * sizeof(bool)); memset(selected, false, n_sample * sizeof(bool)); for (size_t i = 0; i < n_seed; ++i) selected[result[i]] = true; // 02. Minimum Out-of-Group Initialization float* min_vals = (float*)malloc(n_sample * sizeof(float)); #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; min_vals[i] = euclid_distance_vector(X + n_feature * result[0], X + n_feature * i, n_feature); } for (size_t n = 1; n < n_seed; ++n) { float* p_starting = X + result[n] * n_feature; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], euclid_distance_vector(p_starting, X + n_feature * i, n_feature)); } } // 03. Main Algorithm for (size_t n = n_seed; n < n_result; ++n) { // Find sup of the minimum sup.val = -1.; sup.index = 0; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } v_dist[n - 1] = sup.val; selected[sup.index] = true; result[n] = sup.index; float* p_starting = X + sup.index * n_feature; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], euclid_distance_vector(p_starting, X + n_feature * i, n_feature)); } } free(selected); free(min_vals); }
atomic-17.c
// { dg-do run } extern void abort (void); int x = 6; int main () { int v, l = 2, s = 1; #pragma omp atomic seq_cst x = -3 + x; #pragma omp atomic read seq_cst v = x; if (v != 3) abort (); #pragma omp atomic seq_cst update x = 3 * 2 * 1 + x; #pragma omp atomic read, seq_cst v = x; if (v != 9) abort (); #pragma omp atomic seq_cst, capture v = x = x | 16; if (v != 25) abort (); #pragma omp atomic capture seq_cst v = x = x + 14 * 2 / 4; if (v != 32) abort (); #pragma omp atomic seq_cst capture v = x = 5 | x; if (v != 37) abort (); #pragma omp atomic capture, seq_cst v = x = 40 + 12 - 2 - 7 - x; if (v != 6) abort (); #pragma omp atomic seq_cst read v = x; if (v != 6) abort (); #pragma omp atomic capture seq_cst { v = x; x = 3 + x; } if (v != 6) abort (); #pragma omp atomic seq_cst capture { v = x; x = -1 * -1 * -1 * -1 - x; } if (v != 9) abort (); #pragma omp atomic read seq_cst v = x; if (v != -8) abort (); #pragma omp atomic capture, seq_cst { x = 2 * 2 - x; v = x; } if (v != 12) abort (); #pragma omp atomic seq_cst capture { x = 7 & x; v = x; } if (v != 4) abort (); #pragma omp atomic capture seq_cst { v = x; x = 6; } if (v != 4) abort (); #pragma omp atomic read, seq_cst v = x; if (v != 6) abort (); #pragma omp atomic capture seq_cst { v = x; x = 7 * 8 + 23; } if (v != 6) abort (); #pragma omp atomic seq_cst, read v = x; if (v != 79) abort (); #pragma omp atomic capture , seq_cst { v = x; x = 23 + 6 * 4; } if (v != 79) abort (); #pragma omp atomic read seq_cst v = x; if (v != 47) abort (); #pragma omp atomic seq_cst capture { v = x; x = l ? 17 : 12; } if (v != 47) abort (); #pragma omp atomic capture seq_cst { v = x; x = l = s++ + 3; } if (v != 17 || l != 4 || s != 2) abort (); #pragma omp atomic read seq_cst v = x; if (v != 4) abort (); return 0; }
par_rap.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "_hypre_utilities.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGBuildCoarseOperator *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix **RAP_ptr ) { hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, HYPRE_Int keepTranspose, hypre_ParCSRMatrix **RAP_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT); hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT); HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag); HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd); HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd); hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); HYPRE_Int num_recvs_RT = 0; HYPRE_Int num_sends_RT = 0; HYPRE_Int *send_map_starts_RT; HYPRE_Int *send_map_elmts_RT; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P); HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag); HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag); HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag); hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P); HYPRE_BigInt last_col_diag_P; HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag); HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd); HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P); HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT); hypre_ParCSRMatrix *RAP; HYPRE_BigInt *col_map_offd_RAP = NULL; HYPRE_BigInt *new_col_map_offd_RAP = NULL; hypre_CSRMatrix *RAP_int = NULL; HYPRE_Real *RAP_int_data; HYPRE_Int *RAP_int_i; HYPRE_BigInt *RAP_int_j; hypre_CSRMatrix *RAP_ext; HYPRE_Real *RAP_ext_data = NULL; HYPRE_Int *RAP_ext_i = NULL; HYPRE_BigInt *RAP_ext_j = NULL; hypre_CSRMatrix *RAP_diag; HYPRE_Real *RAP_diag_data; HYPRE_Int *RAP_diag_i; HYPRE_Int *RAP_diag_j; hypre_CSRMatrix *RAP_offd; HYPRE_Real *RAP_offd_data = NULL; HYPRE_Int *RAP_offd_i = NULL; HYPRE_Int *RAP_offd_j = NULL; HYPRE_Int RAP_size; HYPRE_Int RAP_ext_size; HYPRE_Int RAP_diag_size; HYPRE_Int RAP_offd_size; HYPRE_Int P_ext_diag_size; HYPRE_Int P_ext_offd_size; HYPRE_BigInt first_col_diag_RAP; HYPRE_BigInt last_col_diag_RAP; HYPRE_Int num_cols_offd_RAP = 0; hypre_CSRMatrix *R_diag; HYPRE_Real *R_diag_data; HYPRE_Int *R_diag_i; HYPRE_Int *R_diag_j; hypre_CSRMatrix *R_offd; HYPRE_Real *R_offd_data; HYPRE_Int *R_offd_i; HYPRE_Int *R_offd_j; HYPRE_Real *RA_diag_data_array = NULL; HYPRE_Int *RA_diag_j_array = NULL; HYPRE_Real *RA_offd_data_array = NULL; HYPRE_Int *RA_offd_j_array = NULL; hypre_CSRMatrix *Ps_ext; HYPRE_Real *Ps_ext_data; HYPRE_Int *Ps_ext_i; HYPRE_BigInt *Ps_ext_j; HYPRE_Real *P_ext_diag_data = NULL; HYPRE_Int *P_ext_diag_i = NULL; HYPRE_Int *P_ext_diag_j = NULL; HYPRE_Real *P_ext_offd_data = NULL; HYPRE_Int *P_ext_offd_i = NULL; HYPRE_Int *P_ext_offd_j = NULL; HYPRE_BigInt *P_big_offd_j = NULL; HYPRE_BigInt *col_map_offd_Pext; HYPRE_Int *map_P_to_Pext = NULL; HYPRE_Int *map_P_to_RAP = NULL; HYPRE_Int *map_Pext_to_RAP = NULL; HYPRE_Int *P_marker; HYPRE_Int **P_mark_array; HYPRE_Int **A_mark_array; HYPRE_Int *A_marker; HYPRE_BigInt *temp; HYPRE_BigInt n_coarse, n_coarse_RT; HYPRE_Int square = 1; HYPRE_Int num_cols_offd_Pext = 0; HYPRE_Int ic, i, j, k; HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest; HYPRE_Int cnt = 0; /*value; */ HYPRE_Int jj1, jj2, jj3, jcol; HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd; HYPRE_Int jj_counter, jj_count_diag, jj_count_offd; HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */ HYPRE_Int num_nz_cols_A; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Real r_entry; HYPRE_Real r_a_product; HYPRE_Real r_a_p_product; HYPRE_Real zero = 0.0; HYPRE_Int *prefix_sum_workspace; /*----------------------------------------------------------------------- * Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access * to restriction . *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); num_threads = hypre_NumThreads(); if (comm_pkg_RT) { num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } else if (num_procs > 1) { hypre_MatvecCommPkgCreate(RT); comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } hypre_CSRMatrixTranspose(RT_diag,&R_diag,1); if (num_cols_offd_RT) { hypre_CSRMatrixTranspose(RT_offd,&R_offd,1); R_offd_data = hypre_CSRMatrixData(R_offd); R_offd_i = hypre_CSRMatrixI(R_offd); R_offd_j = hypre_CSRMatrixJ(R_offd); } /*----------------------------------------------------------------------- * Access the CSR vectors for R. Also get sizes of fine and * coarse grids. *-----------------------------------------------------------------------*/ R_diag_data = hypre_CSRMatrixData(R_diag); R_diag_i = hypre_CSRMatrixI(R_diag); R_diag_j = hypre_CSRMatrixJ(R_diag); n_coarse = hypre_ParCSRMatrixGlobalNumCols(P); num_nz_cols_A = num_cols_diag_A + num_cols_offd_A; n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT); if (n_coarse != n_coarse_RT) square = 0; /*----------------------------------------------------------------------- * Generate Ps_ext, i.e. portion of P that is stored on neighbor procs * and needed locally for triple matrix product *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedIntMap send_map_elmts_RT_inverse_map; HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL; HYPRE_Int *send_map_elmts_RT_aggregated = NULL; HYPRE_Int send_map_elmts_RT_inverse_map_initialized = num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0; if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntSet send_map_elmts_set; hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int key = send_map_elmts_RT[i]; hypre_UnorderedIntSetPut(&send_map_elmts_set, key); } HYPRE_Int send_map_elmts_unique_size; HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size); hypre_UnorderedIntSetDestroy(&send_map_elmts_set); hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i); } hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST); send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1, HYPRE_MEMORY_HOST); send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { send_map_elmts_starts_RT_aggregated[i] = 0; } #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); #pragma omp atomic send_map_elmts_starts_RT_aggregated[idx]++; } for (i = 0; i < send_map_elmts_unique_size - 1; i++) { send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i]; } send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT]; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1; send_map_elmts_RT_aggregated[offset] = i; } } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1); Ps_ext_data = hypre_CSRMatrixData(Ps_ext); Ps_ext_i = hypre_CSRMatrixI(Ps_ext); Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext); } P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_diag_i[0] = 0; P_ext_offd_i[0] = 0; P_ext_diag_size = 0; P_ext_offd_size = 0; last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1; /*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j) #endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */ { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A); HYPRE_Int P_ext_diag_size_private = 0; HYPRE_Int P_ext_offd_size_private = 0; for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P) P_ext_offd_size_private++; else P_ext_diag_size_private++; } hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { if (P_ext_diag_size) { P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST); P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST); P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST); P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST); //temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) { HYPRE_BigInt value = Ps_ext_j[j]; if (value < first_col_diag_P || value > last_col_diag_P) { //Ps_ext_j[P_ext_offd_size_private] = value; //temp[P_ext_offd_size_private] = value; P_big_offd_j[P_ext_offd_size_private] = value; P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j]; } else { P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P); P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j]; } } P_ext_diag_i[i+1] = P_ext_diag_size_private; P_ext_offd_i[i+1] = P_ext_offd_size_private; } } /* omp parallel */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (P_ext_offd_size || num_cols_offd_P) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads()); #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < P_ext_offd_size; i++) { //hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]); hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_P; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]); } } /* omp parallel */ /* Warning on getting temp right !!!!! */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_UnorderedBigIntMap col_map_offd_Pext_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < P_ext_offd_size; i++) //Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]); P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]); if (num_cols_offd_Pext) hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size || num_cols_offd_P) { temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); for (i=0; i < P_ext_offd_size; i++) //Ps_ext_j[i] = temp[i]; //temp[i] = Ps_ext_j[i]; temp[i] = P_big_offd_j[i]; cnt = P_ext_offd_size; for (i=0; i < num_cols_offd_P; i++) temp[cnt++] = col_map_offd_P[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_Pext = 1; HYPRE_BigInt value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_Pext++] = value; } } } if (num_cols_offd_Pext) col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_Pext; i++) col_map_offd_Pext[i] = temp[i]; if (P_ext_offd_size || num_cols_offd_P) hypre_TFree(temp, HYPRE_MEMORY_HOST); /*if (P_ext_offd_size) P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/ for (i=0 ; i < P_ext_offd_size; i++) P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext, //Ps_ext_j[i], P_big_offd_j[i], num_cols_offd_Pext); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size) hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST); /*if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; }*/ if (num_cols_offd_P) { map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_Pext; i++) if (col_map_offd_Pext[i] == col_map_offd_P[cnt]) { map_P_to_Pext[cnt++] = i; if (cnt == num_cols_offd_P) break; } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /*----------------------------------------------------------------------- * First Pass: Determine size of RAP_int and set up RAP_int_i if there * are more than one processor and nonzero elements in R_offd *-----------------------------------------------------------------------*/ P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_RT) { jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Allocate marker arrays. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) { P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; } A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); A_marker = A_mark_array[ii]; /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } } } jj_count[ii] = jj_counter; } /*----------------------------------------------------------------------- * Allocate RAP_int_data and RAP_int_j arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads-1; i++) jj_count[i+1] += jj_count[i]; RAP_size = jj_count[num_threads-1]; RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1, HYPRE_MEMORY_HOST); RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST); RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST); RAP_int_i[num_cols_offd_RT] = RAP_size; /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_int_data and RAP_int_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_counter = start_indexing; if (ii > 0) jj_counter = jj_count[ii-1]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; RAP_int_i[ic] = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; r_entry = R_offd_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; r_a_product = r_entry * A_offd_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; r_a_product = r_entry * A_diag_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } } } if (num_cols_offd_Pext || num_cols_diag_P) hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size); hypre_CSRMatrixMemoryLocation(RAP_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(RAP_int) = RAP_int_i; hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j; hypre_CSRMatrixData(RAP_int) = RAP_int_data; hypre_TFree(jj_count, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif RAP_ext_size = 0; if (num_sends_RT || num_recvs_RT) { void *request; hypre_ExchangeExternalRowsInit(RAP_int, comm_pkg_RT, &request); RAP_ext = hypre_ExchangeExternalRowsWait(request); RAP_ext_i = hypre_CSRMatrixI(RAP_ext); RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext); RAP_ext_data = hypre_CSRMatrixData(RAP_ext); RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)]; } if (num_cols_offd_RT) { hypre_CSRMatrixDestroy(RAP_int); RAP_int = NULL; } RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE); RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE); first_col_diag_RAP = first_col_diag_P; last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1; /*----------------------------------------------------------------------- * check for new nonzero columns in RAP_offd generated through RAP_ext *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntMap col_map_offd_RAP_inverse; if (RAP_ext_size || num_cols_offd_Pext) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads()); cnt = 0; #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < RAP_ext_size; i++) { if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_Pext; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]); } } /* omp parallel */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (RAP_ext_size || num_cols_offd_Pext) { temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size+num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) temp[cnt++] = RAP_ext_j[i]; for (i=0; i < num_cols_offd_Pext; i++) temp[cnt++] = col_map_offd_Pext[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); HYPRE_BigInt value = temp[0]; num_cols_offd_RAP = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_RAP++] = value; } } } /* now evaluate col_map_offd_RAP */ if (num_cols_offd_RAP) col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST); for (i=0 ; i < num_cols_offd_RAP; i++) col_map_offd_RAP[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_P) { map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_P[cnt]) { map_P_to_RAP[cnt++] = i; if (cnt == num_cols_offd_P) break; } } if (num_cols_offd_Pext) { map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt]) { map_Pext_to_RAP[cnt++] = i; if (cnt == num_cols_offd_Pext) break; } } /*----------------------------------------------------------------------- * Convert RAP_ext column indices *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P #ifdef HYPRE_CONCURRENT_HOPSCOTCH +(HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]); #else +(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i],num_cols_offd_RAP); #endif else RAP_ext_j[i] -= first_col_diag_RAP; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (num_cols_offd_RAP) hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /* need to allocate new P_marker etc. and make further changes */ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP, HYPRE_MEMORY_HOST); A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_count_diag = start_indexing; jj_count_offd = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, RAP_{ic,ic}. and for all points * being added to row ic of RAP_diag and RAP_offd through RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (square) P_marker[ic] = jj_count_diag++; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int) RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ if (num_cols_offd_P) { for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } } /*-------------------------------------------------------------------- * Set RAP_diag_i and RAP_offd_i for this row. *--------------------------------------------------------------------*/ /* RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; */ } jj_cnt_diag[ii] = jj_count_diag; jj_cnt_offd[ii] = jj_count_offd; } for (i=0; i < num_threads-1; i++) { jj_cnt_diag[i+1] += jj_cnt_diag[i]; jj_cnt_offd[i+1] += jj_cnt_offd[i]; } jj_count_diag = jj_cnt_diag[num_threads-1]; jj_count_offd = jj_cnt_offd[num_threads-1]; RAP_diag_i[num_cols_diag_RT] = jj_count_diag; RAP_offd_i[num_cols_diag_RT] = jj_count_offd; /*----------------------------------------------------------------------- * Allocate RAP_diag_data and RAP_diag_j arrays. * Allocate RAP_offd_data and RAP_offd_j arrays. *-----------------------------------------------------------------------*/ RAP_diag_size = jj_count_diag; if (RAP_diag_size) { RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_DEVICE); RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_DEVICE); } RAP_offd_size = jj_count_offd; if (RAP_offd_size) { RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_DEVICE); RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_DEVICE); } if (RAP_offd_size == 0 && num_cols_offd_RAP != 0) { num_cols_offd_RAP = 0; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); } RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_diag_data and RAP_diag_j. * Second Pass: Fill in RAP_offd_data and RAP_offd_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A ; i++) { A_marker[i] = -1; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (ii > 0) { jj_count_diag = jj_cnt_diag[ii-1]; jj_count_offd = jj_cnt_offd[ii-1]; } // temporal matrix RA = R*A // only need to store one row per thread because R*A and (R*A)*P are fused // into one loop. hypre_CSRMatrix RA_diag, RA_offd; RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii; RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii; RA_diag.num_nonzeros = 0; RA_offd.num_nonzeros = 0; if (num_cols_offd_A) { RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii; RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros; HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros; if (square) { P_marker[ic] = jj_count_diag; RAP_diag_data[jj_count_diag] = zero; RAP_diag_j[jj_count_diag] = ic; jj_count_diag++; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag and compute row ic of RA. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; r_entry = R_diag_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; HYPRE_Real a_entry = A_offd_data[jj2]; HYPRE_Int marker = A_marker[i2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_offd) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = RA_offd.num_nonzeros; RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry; RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2; RA_offd.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry; // JSP: compiler will more likely to generate FMA instructions // when we don't eliminate common subexpressions of // r_entry * A_offd_data[jj2] manually. } } // loop over entries in row i1 of A_offd } // num_cols_offd_A /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; HYPRE_Real a_entry = A_diag_data[jj2]; HYPRE_Int marker = A_marker[i2+num_cols_offd_A]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_diag) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros; RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry; RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2; RA_diag.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry; } } // loop over entries in row i1 of A_diag } // loop over entries in row ic of R_diag /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_offd. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++) { i1 = RA_offd.j[jj1 - ra_row_begin_offd]; r_a_product = RA_offd.data[jj1 - ra_row_begin_offd]; /*----------------------------------------------------------- * Loop over entries in row i1 of P_ext. *-----------------------------------------------------------*/ for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++) { i2 = P_ext_diag_j[jj2]; HYPRE_Real p_entry = P_ext_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else RAP_diag_data[marker] += r_a_product * p_entry; } for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++) { i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_ext_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[marker] += r_a_product * p_entry; } } // loop over entries in row ic of RA_offd /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_diag. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++) { HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag]; HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag]; /*----------------------------------------------------------------- * Loop over entries in row i1 of P_diag. *-----------------------------------------------------------------*/ for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++) { i2 = P_diag_j[jj2]; HYPRE_Real p_entry = P_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else { RAP_diag_data[marker] += r_a_product * p_entry; } } if (num_cols_offd_P) { for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++) { i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else { RAP_offd_data[marker] += r_a_product * p_entry; } } } // num_cols_offd_P } // loop over entries in row ic of RA_diag. } // Loop over interior c-points. hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } // omp parallel for /* check if really all off-diagonal entries occurring in col_map_offd_RAP are represented and eliminate if necessary */ P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd_RAP; i++) P_marker[i] = -1; jj_count_offd = 0; #ifdef HYPRE_USING_ATOMIC #pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; #ifdef HYPRE_USING_ATOMIC if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1) { jj_count_offd++; } #else if (P_marker[i3]) { P_marker[i3] = 0; jj_count_offd++; } #endif } if (jj_count_offd < num_cols_offd_RAP) { new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_counter = 0; for (i=0; i < num_cols_offd_RAP; i++) if (!P_marker[i]) { P_marker[i] = jj_counter; new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; RAP_offd_j[i] = P_marker[i3]; } num_cols_offd_RAP = jj_count_offd; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); col_map_offd_RAP = new_col_map_offd_RAP; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse, RT_partitioning, coarse_partitioning, num_cols_offd_RAP, RAP_diag_size, RAP_offd_size); /* Have RAP own coarse_partitioning instead of P */ hypre_ParCSRMatrixSetColStartsOwner(P,0); hypre_ParCSRMatrixSetColStartsOwner(RT,0); RAP_diag = hypre_ParCSRMatrixDiag(RAP); hypre_CSRMatrixI(RAP_diag) = RAP_diag_i; if (RAP_diag_size) { hypre_CSRMatrixData(RAP_diag) = RAP_diag_data; hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j; } RAP_offd = hypre_ParCSRMatrixOffd(RAP); hypre_CSRMatrixI(RAP_offd) = RAP_offd_i; if (num_cols_offd_RAP) { hypre_CSRMatrixData(RAP_offd) = RAP_offd_data; hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j; hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP; } if (num_procs > 1) { /* hypre_GenerateRAPCommPkg(RAP, A); */ hypre_MatvecCommPkgCreate(RAP); } *RAP_ptr = RAP; /*----------------------------------------------------------------------- * Free R, P_ext and marker arrays. *-----------------------------------------------------------------------*/ if (keepTranspose) { hypre_ParCSRMatrixDiagT(RT) = R_diag; } else { hypre_CSRMatrixDestroy(R_diag); } R_diag = NULL; if (num_cols_offd_RT) { if (keepTranspose) { hypre_ParCSRMatrixOffdT(RT) = R_offd; } else { hypre_CSRMatrixDestroy(R_offd); } R_offd = NULL; } if (num_sends_RT || num_recvs_RT) { hypre_CSRMatrixDestroy(RAP_ext); RAP_ext = NULL; } hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST); } if (num_cols_offd_Pext) { hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST); } if (P_ext_diag_size) { hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST); } hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map); } hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST); hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime(); #endif return(0); }
user_basis_core.h
#ifndef _user_basis_core_H #define _user_basis_core_H #include <complex> #include <vector> #include <stdio.h> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "benes_perm.h" #include "openmp.h" namespace basis_general { template<class I> struct op_results { std::complex<double> m; I r; op_results(std::complex<double> _m,I _r): m(_m),r(_r) {} }; template<class I,class P=signed char> class user_basis_core : public general_basis_core<I,P> { typedef I (*map_type)(I,int,P*,I*); typedef I (*next_state_type)(I,I,I,I*); typedef int (*op_func_type)(op_results<I>*,char,int,int,I*); typedef void (*count_particles_type)(I,int*,I*); typedef bool (*check_state_nosymm_type)(I,I,I*); public: map_type * map_funcs; next_state_type next_state_func; op_func_type op_func; count_particles_type count_particles_func; check_state_nosymm_type pre_check_state; const int n_sectors; I *ns_args,*precs_args,*op_args,*count_particles_args; I **maps_args; user_basis_core(const int _N,const int _nt, void * _map_funcs, const int _pers[], const int _qs[], I** _maps_args, const int _n_sectors,size_t _next_state,I *_ns_args,size_t _pre_check_state, I* _precs_args,size_t _count_particles,I *_count_particles_args,size_t _op_func,I *_op_args) : \ general_basis_core<I,P>::general_basis_core(_N,_nt,NULL,_pers,_qs,true), n_sectors(_n_sectors) { map_funcs = (map_type*)_map_funcs; maps_args = _maps_args; next_state_func = (next_state_type)_next_state; count_particles_func = (count_particles_type)_count_particles; op_func = (op_func_type)_op_func; op_args = _op_args; ns_args = _ns_args; pre_check_state = (check_state_nosymm_type)_pre_check_state; precs_args = _precs_args; count_particles_args = _count_particles_args; } ~user_basis_core() {} I map_state(I s,int n_map,P &phase){ if(general_basis_core<I,P>::nt<=0){ return s; } P temp_phase = 1; s = (*map_funcs[n_map])(s, general_basis_core<I,P>::N, &temp_phase, maps_args[n_map]); phase *= temp_phase; return s; } void map_state(I s[],npy_intp M,int n_map,P phase[]){ if(general_basis_core<I,P>::nt<=0){ return; } map_type func = map_funcs[n_map]; I * args = maps_args[n_map]; #pragma omp for schedule(static) for(npy_intp i=0;i<M;i++){ P temp_phase = 1; s[i] = (*func)(s[i], general_basis_core<I,P>::N, &temp_phase, args); phase[i] *= temp_phase; } } std::vector<int> count_particles(const I s){ std::vector<int> v(n_sectors); (*count_particles_func)(s,&v[0],count_particles_args); return v; } I inline next_state_pcon(const I s,const I nns){ return (*next_state_func)(s,nns,(I)general_basis_core<I,P>::N, ns_args); } double check_state(I s){ bool ns_check=true; if(pre_check_state){ ns_check = (*pre_check_state)(s,(I)general_basis_core<I,P>::N, precs_args); } if(ns_check){ return check_state_core_unrolled<I>(this,s,general_basis_core<I,P>::nt); } else{ return std::numeric_limits<double>::quiet_NaN(); } } int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){ I s = r; op_results<I> res(m,r); for(int j=n_op-1;j>=0;j--){ int err = (*op_func)(&res,opstr[j],indx[j],general_basis_core<I,P>::N,op_args); if(err!=0){ return err; } if(std::abs(res.m)==0){ res.r = s; break; } } m = res.m; r = res.r; return 0; } }; } #endif
weightedNorm1Many.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <cmath> extern "C" void FUNC(weightedNorm1Many)(const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i]; wa2 += fabs(ai)*wi; } } cpu_wa[0] = wa2; }
FullyDistSpVec.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _FULLY_DIST_SP_VEC_H_ #define _FULLY_DIST_SP_VEC_H_ #include <iostream> #include <vector> #include <utility> #include "CommGrid.h" #include "promote.h" #include "SpParMat.h" #include "FullyDist.h" #include "Exception.h" #include "OptBuf.h" #include "CombBLAS.h" namespace combblas { template <class IT, class NT, class DER> class SpParMat; template <class IT> class DistEdgeList; template <class IU, class NU> class FullyDistVec; template <class IU, class NU> class SparseVectorLocalIterator; /** * A sparse vector of length n (with nnz <= n of them being nonzeros) is distributed to * "all the processors" in a way that "respects ordering" of the nonzero indices * Example: x = [5,1,6,2,9] for nnz(x)=5 and length(x)=12 * we use 4 processors P_00, P_01, P_10, P_11 * Then P_00 owns [1,2] (in the range [0,...,2]), P_01 ow`ns [5] (in the range [3,...,5]), and so on. * In the case of A(v,w) type sparse matrix indexing, this doesn't matter because n = nnz * After all, A(v,w) will have dimensions length(v) x length (w) * v and w will be of numerical type (NT) "int" and their indices (IT) will be consecutive integers * It is possibly that nonzero counts are distributed unevenly * Example: x=[1,2,3,4,5] and length(x) = 20, then P_00 would own all the nonzeros and the rest will hold empry vectors * Just like in SpParMat case, indices are local to processors (they belong to range [0,...,length-1] on each processor) * \warning Always create vectors with the right length, setting elements won't increase its length (similar to operator[] on std::vector) **/ template <class IT, class NT> class FullyDistSpVec: public FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type> { public: FullyDistSpVec ( ); explicit FullyDistSpVec ( IT glen ); FullyDistSpVec ( std::shared_ptr<CommGrid> grid); FullyDistSpVec ( std::shared_ptr<CommGrid> grid, IT glen); template <typename _UnaryOperation> FullyDistSpVec (const FullyDistVec<IT,NT> & rhs, _UnaryOperation unop); FullyDistSpVec (const FullyDistVec<IT,NT> & rhs); // Conversion copy-constructor FullyDistSpVec (IT globalsize, const FullyDistVec<IT,IT> & inds, const FullyDistVec<IT,NT> & vals, bool SumDuplicates = false); FullyDistSpVec (std::shared_ptr<CommGrid> grid, IT globallen, const std::vector<IT>& indvec, const std::vector<NT> & numvec, bool SumDuplicates = false, bool sorted=false); IT NnzUntil() const; FullyDistSpVec<IT,NT> Invert (IT globallen); template <typename _BinaryOperationIdx, typename _BinaryOperationVal, typename _BinaryOperationDuplicate> FullyDistSpVec<IT,NT> Invert (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, _BinaryOperationDuplicate __binopDuplicate); template <typename _BinaryOperationIdx, typename _BinaryOperationVal> FullyDistSpVec<IT,NT> InvertRMA (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal); template <typename NT1, typename _UnaryOperation> void Select (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation unop); template <typename _UnaryOperation> void FilterByVal (FullyDistSpVec<IT,IT> Selector, _UnaryOperation __unop, bool filterByIndex); template <typename NT1> void Setminus (const FullyDistSpVec<IT,NT1> & other); //template <typename NT1, typename _UnaryOperation> //void Set (FullyDistSpVec<IT,NT1> Selector, _UnaryOperation __unop); template <typename NT1, typename _UnaryOperation, typename _BinaryOperation> void SelectApply (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation __unop, _BinaryOperation __binop); //! like operator=, but instead of making a deep copy it just steals the contents. //! Useful for places where the "victim" will be distroyed immediately after the call. void stealFrom(FullyDistSpVec<IT,NT> & victim); FullyDistSpVec<IT,NT> & operator=(const FullyDistSpVec< IT,NT > & rhs); FullyDistSpVec<IT,NT> & operator=(const FullyDistVec< IT,NT > & rhs); // convert from dense FullyDistSpVec<IT,NT> & operator=(NT fixedval) // assign fixed value { #ifdef _OPENMP #pragma omp parallel for #endif for(size_t i=0; i < ind.size(); ++i) num[i] = fixedval; return *this; } FullyDistSpVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs); FullyDistSpVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs); class ScalarReadSaveHandler { public: NT getNoNum(IT index) { return static_cast<NT>(1); } template <typename c, typename t> NT read(std::basic_istream<c,t>& is, IT index) { NT v; is >> v; return v; } template <typename c, typename t> void save(std::basic_ostream<c,t>& os, const NT& v, IT index) { os << v; } }; template <class HANDLER> void ParallelWrite(const std::string & filename, bool onebased, HANDLER handler, bool includeindices = true, bool includeheader = false); void ParallelWrite(const std::string & filename, bool onebased, bool includeindices = true) { ParallelWrite(filename, onebased, ScalarReadSaveHandler(), includeindices); }; template <typename _BinaryOperation> void ParallelRead (const std::string & filename, bool onebased, _BinaryOperation BinOp); //! Totally obsolete version that only accepts an ifstream object and ascii files template <class HANDLER> std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler); std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); } template <class HANDLER> void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false); void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler()); } template <typename NNT> operator FullyDistSpVec< IT,NNT > () const //!< Type conversion operator { FullyDistSpVec<IT,NNT> CVT(commGrid); CVT.ind = std::vector<IT>(ind.begin(), ind.end()); CVT.num = std::vector<NNT>(num.begin(), num.end()); CVT.glen = glen; return CVT; } bool operator==(const FullyDistSpVec<IT,NT> & rhs) const { FullyDistVec<IT,NT> v = *this; FullyDistVec<IT,NT> w = rhs; return (v == w); } void PrintInfo(std::string vecname) const; void iota(IT globalsize, NT first); void nziota(NT first); FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //!< SpRef (expects ri to be 0-based) void SetElement (IT indx, NT numx); // element-wise assignment void DelElement (IT indx); // element-wise deletion NT operator[](IT indx); bool WasFound() const { return wasFound; } //! sort the vector itself, return the permutation vector (0-based) FullyDistSpVec<IT, IT> sort(); #if __cplusplus > 199711L template <typename _BinaryOperation = minimum<NT> > FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN); #else template <typename _BinaryOperation > FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op, MPI_Op mympiop); #endif IT getlocnnz() const { return ind.size(); } IT getnnz() const { IT totnnz = 0; IT locnnz = ind.size(); MPI_Allreduce( &locnnz, &totnnz, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld()); return totnnz; } using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyRowLength; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::RowLenUntil; void setNumToInd() { IT offset = LengthUntil(); IT spsize = ind.size(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i< spsize; ++i) num[i] = ind[i] + offset; } template <typename _Predicate> IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true template <typename _UnaryOperation> void Apply(_UnaryOperation __unary_op) { //transform(num.begin(), num.end(), num.begin(), __unary_op); IT spsize = num.size(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i < spsize; ++i) num[i] = __unary_op(num[i]); } template <typename _BinaryOperation> void ApplyInd(_BinaryOperation __binary_op) { IT offset = LengthUntil(); IT spsize = ind.size(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; i < spsize; ++i) num[i] = __binary_op(num[i], ind[i] + offset); } template <typename _BinaryOperation> NT Reduce(_BinaryOperation __binary_op, NT init) const; template <typename OUT, typename _BinaryOperation, typename _UnaryOperation> OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const; void DebugPrint(); std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; } void Reset(); NT GetLocalElement(IT indx); void BulkSet(IT inds[], int count); std::vector<IT> GetLocalInd (){std::vector<IT> rind = ind; return rind;}; std::vector<NT> GetLocalNum (){std::vector<NT> rnum = num; return rnum;}; template <typename _Predicate> FullyDistVec<IT,IT> FindInds(_Predicate pred) const; template <typename _Predicate> FullyDistVec<IT,NT> FindVals(_Predicate pred) const; protected: using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen; using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid; private: std::vector< IT > ind; // ind.size() give the number of nonzeros std::vector< NT > num; bool wasFound; // true if the last GetElement operation returned an actual value template <typename _BinaryOperation> void SparseCommon(std::vector< std::vector < std::pair<IT,NT> > > & data, _BinaryOperation BinOp); #if __cplusplus > 199711L template <typename _BinaryOperation = minimum<NT> > FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN); #else template <typename _BinaryOperation > FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op, MPI_Op mympiop); #endif template <class IU, class NU> friend class FullyDistSpVec; template <class IU, class NU> friend class FullyDistVec; template <class IU, class NU, class UDER> friend class SpParMat; template <class IU, class NU> friend class SparseVectorLocalIterator; template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> friend FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x ); template <typename SR, typename IU, typename NUM, typename UDER> friend FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue); template <typename VT, typename IU, typename UDER> // NoSR version (in BFSFriends.h) friend FullyDistSpVec<IU,VT> SpMV (const SpParMat<IU,bool,UDER> & A, const FullyDistSpVec<IU,VT> & x, OptBuf<int32_t, VT > & optbuf); template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf); template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA); template <typename IU, typename NU1, typename NU2> friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp); template <typename IU> friend void RandPerm(FullyDistSpVec<IU,IU> & V); // called on an existing object, randomly permutes it template <typename IU> friend void RenameVertices(DistEdgeList<IU> & DEL); //! Helper functions for sparse matrix X sparse vector // Ariful: I made this an internal function in ParFriends.h //template <typename SR, typename IU, typename OVT> //friend void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs); template <typename IU, typename VT> friend void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs); template<typename IU, typename NV> friend void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue); template <class IU, class NU, class DER, typename _UnaryOperation> friend SpParMat<IU, bool, DER> PermMat1 (const FullyDistSpVec<IU,NU> & ri, const IU ncol, _UnaryOperation __unop); }; } #include "FullyDistSpVec.cpp" #endif
c3_fmt.c
/* * Generic crypt(3) support, as well as support for glibc's crypt_r(3) and * Solaris' MT-safe crypt(3C) with OpenMP parallelization. * * This file is part of John the Ripper password cracker, * Copyright (c) 2009-2013 by Solar Designer * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. */ #if AC_BUILT #include "autoconfig.h" #endif #if HAVE_CRYPT /* if this comes after the #define crap below, there are often * problems with strdup or other things not being defined. We * move this block of includes to above the _XOPEN_* defines */ #if STRING_WITH_STRINGS #include <string.h> #include <strings.h> #elif HAVE_STRING_H #include <string.h> #elif HAVE_STRINGS_H #include <strings.h> #endif #if !AC_BUILT #include <string.h> #ifndef _MSC_VER #include <strings.h> #endif #undef _XOPEN_VERSION #undef _XOPEN_SOURCE #undef _XOPEN_SOURCE_EXTENDED #undef _GNU_SOURCE #define _XOPEN_SOURCE 4 /* for crypt(3) */ #define _XOPEN_SOURCE_EXTENDED 1 /* for OpenBSD */ #define _XOPEN_VERSION 4 #define _XPG4_2 #define _GNU_SOURCE 1 /* for crypt_r(3) */ #include <stdio.h> #ifdef __CYGWIN__ #include <crypt.h> #endif #if defined(_OPENMP) && defined(__GLIBC__) #include <crypt.h> #else #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #endif #endif #if HAVE_CRYPT_H #include <crypt.h> #endif #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #if defined(_OPENMP) #include <omp.h> /* for omp_get_thread_num() */ #endif #include "options.h" #include "arch.h" #include "misc.h" #include "params.h" #include "memory.h" #include "common.h" #include "formats.h" #include "loader.h" #include "john.h" #ifdef HAVE_MPI #include "john-mpi.h" #endif #include "memdbg.h" #define FORMAT_LABEL "crypt" #define FORMAT_NAME "generic crypt(3)" #define ALGORITHM_NAME "?/" ARCH_BITS_STR #define BENCHMARK_COMMENT " DES" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 72 #define BINARY_SIZE 128 #define BINARY_ALIGN 1 #define SALT_SIZE BINARY_SIZE #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 96 #define MAX_KEYS_PER_CRYPT 96 static struct fmt_tests tests[] = { {"CCNf8Sbh3HDfQ", "U*U*U*U*"}, {"CCX.K.MFy4Ois", "U*U***U"}, {"CC4rMpbg9AMZ.", "U*U***U*"}, {"XXxzOu6maQKqQ", "*U*U*U*U"}, {"SDbsugeBiC58A", ""}, {NULL} }; static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static char saved_salt[SALT_SIZE]; static char crypt_out[MAX_KEYS_PER_CRYPT][BINARY_SIZE]; #if defined(_OPENMP) && defined(__GLIBC__) #define MAX_THREADS MAX_KEYS_PER_CRYPT /* We assume that this is zero-initialized (all NULL pointers) */ static struct crypt_data *crypt_data[MAX_THREADS]; #endif static void init(struct fmt_main *self) { if (options.subformat) { int i; char *salt = tests[0].ciphertext; #if defined(_OPENMP) && defined(__GLIBC__) struct crypt_data data; data.initialized = 0; #endif /* * Allow * ./john --list=format-tests --format=crypt --subformat=md5crypt * in addition to * ./john --test --format=crypt --subformat=md5crypt * * That's why, don't require FLG_TEST_CHK to be set. */ if (options.flags & FLG_PASSWD) { fprintf(stderr, "\n%s: --subformat option is only for --test or --list=format-tests\n", FORMAT_LABEL); error(); } if (!strcmp(options.subformat, "?")) { fprintf(stderr, "Subformat may either be a verbatim salt, or: descrypt, md5crypt, bcrypt, sha256crypt, sha512crypt, sun-md5\n\n"); error(); } else if (!strcasecmp(options.subformat, "md5crypt") || !strcasecmp(options.subformat, "md5")) { static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " MD5"; salt = "$1$dXc3I7Rw$"; } else if (!strcasecmp(options.subformat, "sunmd5") || !strcasecmp(options.subformat, "sun-md5")) { static struct fmt_tests tests[] = { {"$md5$rounds=904$Vc3VgyFx44iS8.Yu$Scf90iLWN6O6mT9TA06NK/", "test"}, {"$md5$rounds=904$ZZZig8GS.S0pRNhc$dw5NMYJoxLlnFq4E.phLy.", "Don41dL33"}, {"$md5$rounds=904$zSuVTn567UJLv14u$q2n2ZBFwKg2tElFBIzUq/0", "J4ck!3Wood"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SunMD5"; salt = "$md5$rounds=904$Vc3VgyFx44iS8.Yu$dummy"; } else if ((!strcasecmp(options.subformat, "sha256crypt")) || (!strcasecmp(options.subformat, "sha-256")) || (!strcasecmp(options.subformat, "sha256"))) { static struct fmt_tests tests[] = { {"$5$LKO/Ute40T3FNF95$U0prpBQd4PloSGU0pnpM4z9wKn4vZ1.jsrzQfPqxph9", "U*U*U*U*"}, {"$5$LKO/Ute40T3FNF95$fdgfoJEBoMajNxCv3Ru9LyQ0xZgv0OBMQoq80LQ/Qd.", "U*U***U"}, {"$5$LKO/Ute40T3FNF95$8Ry82xGnnPI/6HtFYnvPBTYgOL23sdMXn8C29aO.x/A", "U*U***U*"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SHA-256 rounds=5000"; salt = "$5$LKO/Ute40T3FNF95$"; } else if ((!strcasecmp(options.subformat, "sha512crypt")) || (!strcasecmp(options.subformat, "sha-512")) || (!strcasecmp(options.subformat, "sha512"))) { static struct fmt_tests tests[] = { {"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"}, {"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"}, {"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SHA-512 rounds=5000"; salt = "$6$LKO/Ute40T3FNF95$"; } else if ((!strcasecmp(options.subformat, "bf")) || (!strcasecmp(options.subformat, "blowfish")) || (!strcasecmp(options.subformat, "bcrypt"))) { static struct fmt_tests tests[] = { {"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW","U*U"}, {"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK","U*U*"}, {"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a","U*U*U"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " BF x32"; salt = "$2a$05$AD6y0uWY62Xk2TXZ"; } else if (!strcasecmp(options.subformat, "descrypt") || !strcasecmp(options.subformat, "des")) { salt = "CC"; } else { char *p = mem_alloc_tiny(strlen(options.subformat) + 2, MEM_ALIGN_NONE); strcpy(p, " "); strcat(p, options.subformat); self->params.benchmark_comment = p; salt = options.subformat; /* turn off many salts test, since we are not updating the */ /* params.tests structure data. */ self->params.benchmark_length = -1; } for (i = 0; i < 5; i++) { char *c; #if defined(_OPENMP) && defined(__GLIBC__) c = crypt_r(tests[i].plaintext, salt, &data); #else c = crypt(tests[i].plaintext, salt); #endif if (c && strlen(c) >= 7) tests[i].ciphertext = strdup(c); else { fprintf(stderr, "%s not supported on this system\n", options.subformat); error(); } } if (strlen(tests[0].ciphertext) == 13 && strcasecmp(options.subformat, "descrypt") && strcasecmp(options.subformat, "des")) { fprintf(stderr, "%s not supported on this system\n", options.subformat); error(); } } } static int valid(char *ciphertext, struct fmt_main *self) { int length, count_base64, id, pw_length; char pw[PLAINTEXT_LENGTH + 1], *new_ciphertext; /* We assume that these are zero-initialized */ static char sup_length[BINARY_SIZE], sup_id[0x80]; length = count_base64 = 0; while (ciphertext[length]) { if (atoi64[ARCH_INDEX(ciphertext[length])] != 0x7F && (ciphertext[0] == '_' || length >= 2)) count_base64++; length++; } if (length < 13 || length >= BINARY_SIZE) return 0; id = 0; if (length == 13 && count_base64 == 11) id = 1; else if (length >= 13 && count_base64 >= length - 2 && /* allow for invalid salt */ (length - 2) % 11 == 0) id = 2; else if (length == 20 && count_base64 == 19 && ciphertext[0] == '_') id = 3; else if (ciphertext[0] == '$') { id = (unsigned char)ciphertext[1]; if (id <= 0x20 || id >= 0x80) id = 9; } else if (ciphertext[0] == '*' || ciphertext[0] == '!') /* likely locked */ id = 10; /* Previously detected as supported */ if (sup_length[length] > 0 && sup_id[id] > 0) return 1; /* Previously detected as unsupported */ if (sup_length[length] < 0 && sup_id[id] < 0) return 0; pw_length = ((length - 2) / 11) << 3; if (pw_length >= sizeof(pw)) pw_length = sizeof(pw) - 1; memcpy(pw, ciphertext, pw_length); /* reuse the string, why not? */ pw[pw_length] = 0; #if defined(_OPENMP) && defined(__GLIBC__) /* * Let's use crypt_r(3) just like we will in crypt_all() below. * It is possible that crypt(3) and crypt_r(3) differ in their supported hash * types on a given system. */ { struct crypt_data **data = &crypt_data[0]; if (!*data) { /* * **data is not exactly tiny, but we use mem_alloc_tiny() for its alignment * support and error checking. We do not need to free() this memory anyway. * * The page alignment is to keep different threads' data on different pages. */ *data = mem_alloc_tiny(sizeof(**data), MEM_ALIGN_PAGE); memset(*data, 0, sizeof(**data)); } new_ciphertext = crypt_r(pw, ciphertext, *data); } #else new_ciphertext = crypt(pw, ciphertext); #endif if (new_ciphertext && strlen(new_ciphertext) == length && !strncmp(new_ciphertext, ciphertext, 2)) { sup_length[length] = 1; sup_id[id] = 1; return 1; } if (id != 10 && !ldr_in_pot) if (john_main_process) fprintf(stderr, "Warning: " "hash encoding string length %d, type id %c%c\n" "appears to be unsupported on this system; " "will not load such hashes.\n", length, id > 0x20 ? '$' : '#', id > 0x20 ? id : '0' + id); if (!sup_length[length]) sup_length[length] = -1; if (!sup_id[id]) sup_id[id] = -1; return 0; } static void *binary(char *ciphertext) { static char out[BINARY_SIZE]; strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */ return out; } static void *salt(char *ciphertext) { static char out[SALT_SIZE]; int cut = sizeof(out); #if 1 /* This piece is optional, but matching salts are not detected without it */ int length = strlen(ciphertext); switch (length) { case 13: case 24: cut = 2; break; case 20: if (ciphertext[0] == '_') cut = 9; break; case 35: case 46: case 57: if (ciphertext[0] != '$') cut = 2; /* fall through */ default: if ((length >= 26 && length <= 34 && !strncmp(ciphertext, "$1$", 3)) || (length >= 47 && !strncmp(ciphertext, "$5$", 3)) || (length >= 90 && !strncmp(ciphertext, "$6$", 3))) { char *p = strrchr(ciphertext + 3, '$'); if (p) cut = p - ciphertext; } else if (length == 59 && !strncmp(ciphertext, "$2$", 3)) cut = 28; else if (length == 60 && (!strncmp(ciphertext, "$2a$", 4) || !strncmp(ciphertext, "$2b$", 4) || !strncmp(ciphertext, "$2x$", 4) || !strncmp(ciphertext, "$2y$", 4))) cut = 29; else if (length >= 27 && (!strncmp(ciphertext, "$md5$", 5) || !strncmp(ciphertext, "$md5,", 5))) { char *p = strrchr(ciphertext + 4, '$'); if (p) { /* NUL padding is required */ memset(out, 0, sizeof(out)); memcpy(out, ciphertext, ++p - ciphertext); /* * Workaround what looks like a bug in sunmd5.c: crypt_genhash_impl() where it * takes a different substring as salt depending on whether the optional * existing hash encoding is present after the salt or not. Specifically, the * last '$' delimiter is included into the salt when there's no existing hash * encoding after it, but is omitted from the salt otherwise. */ out[p - ciphertext] = 'x'; return out; } } } #endif /* NUL padding is required */ memset(out, 0, sizeof(out)); memcpy(out, ciphertext, cut); return out; } #define H(s, i) \ ((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1])) #define H0(s) \ int i = strlen(s) - 2; \ return i > 0 ? H((s), i) & 0xF : 0 #define H1(s) \ int i = strlen(s) - 2; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & 0xFF : 0 #define H2(s) \ int i = strlen(s) - 2; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & 0xFFF : 0 #define H3(s) \ int i = strlen(s) - 2; \ return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10)) & 0xFFFF : 0 #define H4(s) \ int i = strlen(s) - 2; \ return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & 0xFFFFF : 0 static int binary_hash_0(void *binary) { H0((char *)binary); } static int binary_hash_1(void *binary) { H1((char *)binary); } static int binary_hash_2(void *binary) { H2((char *)binary); } static int binary_hash_3(void *binary) { H3((char *)binary); } static int binary_hash_4(void *binary) { H4((char *)binary); } static int get_hash_0(int index) { H0(crypt_out[index]); } static int get_hash_1(int index) { H1(crypt_out[index]); } static int get_hash_2(int index) { H2(crypt_out[index]); } static int get_hash_3(int index) { H3(crypt_out[index]); } static int get_hash_4(int index) { H4(crypt_out[index]); } static int salt_hash(void *salt) { int i, h; i = strlen((char *)salt) - 1; if (i > 1) i--; h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i - 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])]; h ^= ((unsigned char *)salt)[i]; return h & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { static int warned = 0; int count = *pcount; int index; #if defined(_OPENMP) && defined(__GLIBC__) #pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr) for (index = 0; index < count; index++) { char *hash; int t = omp_get_thread_num(); if (t < MAX_THREADS) { struct crypt_data **data = &crypt_data[t]; if (!*data) { /* Stagger the structs to reduce their competition for the same cache lines */ size_t mask = MEM_ALIGN_PAGE, shift = 0; while (t) { mask >>= 1; if (mask < MEM_ALIGN_CACHE) break; if (t & 1) shift += mask; t >>= 1; } *data = (void *)((char *) mem_alloc_tiny(sizeof(**data) + shift, MEM_ALIGN_PAGE) + shift); memset(*data, 0, sizeof(**data)); } hash = crypt_r(saved_key[index], saved_salt, *data); } else { /* should not happen */ struct crypt_data data; memset(&data, 0, sizeof(data)); hash = crypt_r(saved_key[index], saved_salt, &data); } if (!hash) { #pragma omp critical if (!warned) { fprintf(stderr, "Warning: crypt_r() returned NULL\n"); warned = 1; } hash = ""; } strnzcpy(crypt_out[index], hash, BINARY_SIZE); } #else #if defined(_OPENMP) && defined(__sun) /* * crypt(3C) is MT-safe on Solaris. For traditional DES-based hashes, this is * implemented with locking (hence there's no speedup from the use of multiple * threads, and the per-thread performance is extremely poor anyway). For * modern hash types, the function is actually able to compute multiple hashes * in parallel by different threads (and the performance for some hash types is * reasonable). Overall, this code is reasonable to use for SHA-crypt and * SunMD5 hashes, which are not yet supported by non-jumbo John natively. */ #pragma omp parallel for /* default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, stderr) or __iob */ #endif for (index = 0; index < count; index++) { char *hash = crypt(saved_key[index], saved_salt); if (!hash) { #if defined(_OPENMP) && defined(__sun) #pragma omp critical #endif if (!warned) { fprintf(stderr, "Warning: crypt() returned NULL\n"); warned = 1; } hash = ""; } strnzcpy(crypt_out[index], hash, BINARY_SIZE); } #endif return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (!strcmp((char *)binary, crypt_out[index])) return 1; return 0; } static int cmp_one(void *binary, int index) { return !strcmp((char *)binary, crypt_out[index]); } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 /* * For generic crypt(3), the algorithm is returned as the first "tunable cost": * 0: unknown (shouldn't happen * 1: descrypt * 2: md5crypt * 3: sunmd5 * 4: bcrypt * 5: sha256crypt * 6: sha512crypt * New subformats should be added to the end of the list. * Otherwise, restored sessions might contine cracking different hashes * if the (not yet implemented) option --cost= had been used * when starting that session. */ static unsigned int c3_subformat_algorithm(void *salt) { char *c3_salt; c3_salt = salt; if (!c3_salt[0] || !c3_salt[1] ) return 0; if (!c3_salt[2]) return 1; if (c3_salt[0] != '$') return 0; if (c3_salt[1] == '1') return 2; if (c3_salt[1] == 'm') return 3; if (c3_salt[1] == '2' && c3_salt[2] == 'a') return 4; if (c3_salt[1] == '5') return 5; if (c3_salt[1] == '6') return 6; return 0; } static unsigned int c3_algorithm_specific_cost1(void *salt) { unsigned int algorithm, rounds; char *c3_salt; c3_salt = salt; algorithm = c3_subformat_algorithm(salt); if(algorithm < 3) /* no tunable cost parameters */ return 1; switch (algorithm) { case 1: // DES return 25; case 2: // cryptmd5 return 1000; case 3: // sun_md5 c3_salt = strstr(c3_salt, "rounds="); if (!c3_salt) { return 904+4096; // default } sscanf(c3_salt, "rounds=%d", &rounds); return rounds+4096; case 4: // bf c3_salt += 4; sscanf(c3_salt, "%d", &rounds); return rounds; case 5: case 6: // sha256crypt and sha512crypt handled the same: $x$rounds=xxxx$salt$hash (or $x$salt$hash for 5000 round default); c3_salt += 3; if (strncmp(c3_salt, "rounds=", 7)) return 5000; // default sscanf(c3_salt, "rounds=%d", &rounds); return rounds; } return 1; } #endif struct fmt_main fmt_crypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { /* * use algorithm as first tunable cost: * (0: unknown) * descrypt, md5crypt, sunmd5, bcrypt, sha512crypt, sha256crypt */ "algorithm [1:descrypt 2:md5crypt 3:sunmd5 4:bcrypt 5:sha256crypt 6:sha512crypt]", "algorithm specific iterations", }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, salt, #if FMT_MAIN_VERSION > 11 { c3_subformat_algorithm, #if 1 c3_algorithm_specific_cost1 #endif }, #endif fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, NULL, NULL }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; #endif // HAVE_CRYPT
GB_binop__isle_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_fp64 // A.*B function (eWiseMult): GB_AemultB__isle_fp64 // A*D function (colscale): GB_AxD__isle_fp64 // D*A function (rowscale): GB_DxB__isle_fp64 // C+=B function (dense accum): GB_Cdense_accumB__isle_fp64 // C+=b function (dense accum): GB_Cdense_accumb__isle_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_fp64 // C=scalar+B GB_bind1st__isle_fp64 // C=scalar+B' GB_bind1st_tran__isle_fp64 // C=A+scalar GB_bind2nd__isle_fp64 // C=A'+scalar GB_bind2nd_tran__isle_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isle_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2015 Free Software Foundation, Inc. Written by Mark Mitchell ([email protected]) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "stringpool.h" #include "cp-tree.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "toplev.h" #include "flags.h" #include "timevar.h" #include "diagnostic.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-iterator.h" #include "target.h" #include "hash-table.h" #include "gimplify.h" #include "bitmap.h" #include "omp-low.h" #include "builtins.h" #include "convert.h" #include "gomp-constants.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ typedef struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; } deferred_access; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (input_location, DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, 0, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { if (check_no_cilk (destination, "Cilk array notation cannot be used as a computed goto expression", "%<_Cilk_spawn%> statement cannot be used as a computed goto expression")) destination = error_mark_node; destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (processing_template_decl) return cond; if (warn_sequence_point) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses) { warning (OPT_Wparentheses, "suggest parentheses around assignment used as truth value"); TREE_NO_WARNING (cond) = 1; } return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; if (expr != NULL_TREE) { if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (input_location, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&IF_COND (r)); return r; } /* Process the COND of an if-statement, which may be given by IF_STMT. */ void finish_if_stmt_cond (tree cond, tree if_stmt) { finish_cond (&IF_COND (if_stmt), maybe_convert_cond (cond)); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for a do-while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for a do-while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = NULL_TREE; if (flag_new_for_scope > 0) scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the for-init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_for_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used in a condition for a for-loop", "%<_Cilk_spawn%> statement cannot be used in a condition for a for-loop")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ if (flag_new_for_scope > 0) { tree scope; tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); scope = *scope_ptr; *scope_ptr = NULL; add_stmt (do_poplevel (scope)); } } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { tree r; begin_maybe_infinite_loop (boolean_false_node); r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } /* RANGE_FOR_STMTs do not use nor save the init tree, so we pop it now. */ if (init) pop_stmt_list (init); RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for switch statement", "%<_Cilk_spawn%> statement cannot be used as a condition for switch statement")) cond = error_mark_node; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error ("switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* Warn if the condition has boolean value. */ if (TREE_CODE (orig_type) == BOOLEAN_TYPE) warning_at (input_location, OPT_Wswitch_bool, "switch condition has type bool"); /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else type = expand_start_catch_block (decl); HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else r = do_pushlevel (flags & BCS_TRY_BLOCK ? sk_try : sk_block); /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile. */ tree finish_asm_stmt (int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || TREE_CODE (TREE_TYPE (operand)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (operand)) == METHOD_TYPE /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (operand, lv_asm); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (operand)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error ("type of asm operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); if (!cxx_mark_addressable (operand)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (input_location, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("__label__ declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. */ tree force_paren_expr (tree expr) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand) return expr; if (!DECL_P (expr) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; if (TREE_CODE (expr) == COMPONENT_REF) REF_PARENTHESIZED_P (expr) = true; else if (type_dependent_expression_p (expr)) expr = build1 (PAREN_EXPR, TREE_TYPE (expr), expr); else { cp_lvalue_kind kind = lvalue_kind (expr); if ((kind & ~clk_class) != clk_none) { tree type = unlowered_expr_type (expr); bool rval = !!(kind & clk_rvalueref); type = cp_build_reference_type (type, rval); /* This inhibits warnings in, eg, cxx_mark_addressable (c++/60955). */ warning_sentinel s (extra_warnings); expr = build_static_cast (type, expr, tf_error); if (expr != error_mark_node) REF_PARENTHESIZED_P (expr) = true; } } return expr; } /* Finish a parenthesized expression EXPR. */ tree finish_parenthesized_expr (tree expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; if (TREE_CODE (expr) == STRING_CST) PAREN_STRING_LITERAL_P (expr) = 1; expr = force_paren_expr (expr); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) scope = context_for_name_lookup (decl); object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl && !qualifying_scope) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } return (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. Wrap this in a SCOPE_REF for now. */ else if (processing_template_decl) return build_qualified_name (TREE_TYPE (decl), qualifying_scope, decl, /*template_p=*/false); else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } return build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } } /* If we are currently parsing a template and we encountered a typedef TYPEDEF_DECL that is being accessed though CONTEXT, this function adds the typedef to a list tied to the current template. At template instantiation time, that list is walked and access check performed for each typedef. LOCATION is the location of the usage point of TYPEDEF_DECL. */ void add_typedef_to_current_template_for_access_check (tree typedef_decl, tree context, location_t location) { tree template_info = NULL; tree cs = current_scope (); if (!is_typedef_decl (typedef_decl) || !context || !CLASS_TYPE_P (context) || !cs) return; if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL) template_info = get_template_info (cs); if (template_info && TI_TEMPLATE (template_info) && !currently_open_class (context)) append_type_to_template_for_access_check (cs, typedef_decl, context, location); } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. */ void check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier) { tree scope; tree qualifying_type = NULL_TREE; /* If we are parsing a template declaration and if decl is a typedef, add it to a list tied to the template. At template instantiation time, that list will be walked and access check performed. */ add_typedef_to_current_template_for_access_check (decl, nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl), input_location); /* If we're not checking, return immediately. */ if (deferred_access_no_check) return; /* Determine the SCOPE of DECL. */ scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope)) return; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type) && !dependent_type_p (qualifying_type)) perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, tf_warning_or_error); } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) check_template_keyword (expr); /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr) && !processing_template_decl) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if (!shared_member_p (expr) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else if (BASELINK_P (expr)) ; else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && !currently_open_class (qualifying_class)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ tree perform_koenig_lookup (tree fn, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else if (is_overloaded_fn (fn)) { functions = fn; identifier = DECL_NAME (get_first_fn (functions)); } else if (DECL_P (fn)) { functions = fn; identifier = DECL_NAME (fn); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain) fn = unqualified_fn_lookup_error (identifier); else fn = identifier; } } if (fn && template_id) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return fn; } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = NULL; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); orig_fn = fn; if (processing_template_decl) { /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args) /* For a non-static member function that doesn't have an explicit object argument, we need to specifically test the type dependency of the "this" pointer because it is not included in *ARGS even though it is considered to be part of the list of arguments. Note that this is related to CWG issues 515 and 1005. */ || (TREE_CODE (fn) != COMPONENT_REF && non_static_member_function_p (fn) && current_class_ref && type_dependent_expression_p (current_class_ref))) { result = build_nt_call_vec (fn, *args); SET_EXPR_LOCATION (result, EXPR_LOC_OR_LOC (fn, input_location)); KOENIG_LOOKUP_P (result) = koenig_p; if (cfun) { do { tree fndecl = OVL_CURRENT (fn); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) break; fn = OVL_NEXT (fn); } while (fn); if (!fn) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); if (processing_template_decl) { if (type_dependent_expression_p (object)) { tree ret = build_nt_call_vec (orig_fn, orig_args); release_tree_vector (orig_args); return ret; } object = build_non_dependent_expr (object); } result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, koenig_p, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* Mark the pseudo-destructor call as having side-effects so that we do not issue warnings about its use. */ result = build1 (NOP_EXPR, void_type_node, TREE_OPERAND (fn, 0)); TREE_SIDE_EFFECTS (result) = 1; } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } if (koenig_p) { /* Free garbage OVERLOADs from arg-dependent lookup. */ tree next = NULL_TREE; for (fn = orig_fn; fn && TREE_CODE (fn) == OVERLOAD && OVL_ARG_DEPENDENT (fn); fn = next) { if (processing_template_decl) /* In a template, we'll re-use them at instantiation time. */ OVL_ARG_DEPENDENT (fn) = false; else { next = OVL_CHAIN (fn); ggc_free (fn); } } } return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ tree finish_increment_expr (tree expr, enum tree_code code) { return build_x_unary_op (input_location, code, expr, tf_warning_or_error); } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ tree finish_unary_op_expr (location_t loc, enum tree_code code, tree expr, tsubst_flags_t complain) { tree result = build_x_unary_op (loc, code, expr, complain); if ((complain & tf_warning) && TREE_OVERFLOW_P (result) && !TREE_OVERFLOW_P (expr)) overflow_warning (input_location, result); return result; } /* Finish a compound-literal expression. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain) { if (type == error_mark_node) return error_mark_node; if (TREE_CODE (type) == REFERENCE_TYPE) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain); return cp_build_c_cast (type, compound_literal, complain); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (processing_template_decl) { TREE_TYPE (compound_literal) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (compound_literal) = 1; return compound_literal; } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal) && !check_narrowing (type, compound_literal, complain)) return error_mark_node; if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init (type, compound_literal, complain); if (TREE_CODE (compound_literal) == CONSTRUCTOR) TREE_HAS_CONSTRUCTOR (compound_literal) = true; /* Put static/constant array temporaries in static variables, but always represent class temporaries with TARGET_EXPR so we elide copies. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } else return get_target_expr_sfinae (compound_literal, complain); } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && !strcmp (IDENTIFIER_POINTER (DECL_NAME (ns)), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t, /*tag_scope=*/ts_current); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_ANONYMOUS_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization(); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; /* Check for bare parameter packs in the member variable declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl) && DECL_LANGUAGE (decl) == lang_c) SET_DECL_LANGUAGE (decl, lang_cplusplus); /* Put functions on the TYPE_METHODS list and everything else on the TYPE_FIELDS list. Note that these are built up in reverse order. We reverse them (to obtain declaration order) in finish_struct. */ if (DECL_DECLARES_FUNCTION_P (decl)) { /* We also need to add this function to the CLASSTYPE_METHOD_VEC. */ if (add_method (current_class_type, decl, NULL_TREE)) { DECL_CHAIN (decl) = TYPE_METHODS (current_class_type); TYPE_METHODS (current_class_type) = decl; maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) { if (TREE_CODE (decl) == USING_DECL) { /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; } /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that lookup_field_1 searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; int E } s; s.E = 3; is valid. In addition, the FIELD_DECLs must be maintained in declaration order so that class layout works as expected. However, we don't need that order until class layout, so we save a little time by putting FIELD_DECLs on in reverse order here, and then reversing them in finish_struct_1. (We could also keep a pointer to the correct insertion points in the list.) */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } if (pch_file) note_decl_for_pch (decl); } /* DECL has been declared while we are building a PCH file. Perform actions that we might normally undertake lazily, but which can be performed now so that they do not have to be performed in translation units which include the PCH file. */ void note_decl_for_pch (tree decl) { gcc_assert (pch_file); /* There's a good chance that we'll have to mangle names at some point, even if only for emission in debugging information. */ if (VAR_OR_FUNCTION_DECL_P (decl) && !processing_template_decl) mangle_decl (decl); } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain) { if (cp_unevaluated_operand) /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ mark_used (decl); /* Core issue 696: "[At the July 2009 meeting] the CWG expressed support for an approach in which a reference to a local [constant] automatic variable in a nested class or lambda body would enter the expression as an rvalue, which would reduce the complexity of the problem" FIXME update for final resolution of core issue 696. */ if (decl_maybe_constant_var_p (decl)) { if (processing_template_decl) /* In a template, the constant value may not be in a usable form, so wait until instantiation time. */ return decl; else if (decl_constant_var_p (decl)) return scalar_constant_value (decl); } if (parsing_nsdmi ()) containing_function = NULL_TREE; else /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } if (lambda_expr && TREE_CODE (decl) == VAR_DECL && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } if (context == containing_function) { decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); } else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (0, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (input_location, "%q+#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) error (VAR_P (decl) ? G_("use of local variable with automatic storage from containing function") : G_("use of parameter from containing function")); inform (input_location, "%q+#D declared here", decl); return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ tree finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_TYPENAME_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* If DECL is a variable that would be out of scope under ANSI/ISO rules, but in scope in the ARM, name lookup will succeed. Issue a diagnostic here. */ else decl = check_for_out_of_scope_variable (decl); /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = "use of parameter outside function body"; return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = "missing template arguments"; return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = "expected primary-expression"; return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = convert_from_reference (DECL_INITIAL (decl)); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p; /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : CP_ID_KIND_UNQUALIFIED)); /* [temp.dep.expr] An id-expression is type-dependent if it contains an identifier that was declared with a dependent type. The standard is not very specific about an id-expression that names a set of overloaded functions. What if some of them have dependent types and some of them do not? Presumably, such a name should be treated as a dependent name. */ /* Assume the name is not dependent. */ dependent_p = false; if (!processing_template_decl) /* No names are dependent outside a template. */ ; else if (TREE_CODE (decl) == CONST_DECL) /* We don't want to treat enumerators as dependent. */ ; /* A template-id where the name of the template was not resolved is definitely dependent. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && (identifier_p (TREE_OPERAND (decl, 0)))) dependent_p = true; /* For anything except an overloaded function, just check its type. */ else if (!is_overloaded_fn (decl)) dependent_p = dependent_type_p (TREE_TYPE (decl)); /* For a set of overloaded functions, check each of the functions. */ else { tree fns = decl; if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); /* For a template-id, check to see if the template arguments are dependent. */ if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { tree args = TREE_OPERAND (fns, 1); dependent_p = any_dependent_template_arguments_p (args); /* The functions are those referred to by the template-id. */ fns = TREE_OPERAND (fns, 0); } /* If there are no dependent template arguments, go through the overloaded functions. */ while (fns && !dependent_p) { tree fn = OVL_CURRENT (fns); /* Member functions of dependent classes are dependent. */ if (TREE_CODE (fn) == FUNCTION_DECL && type_dependent_expression_p (fn)) dependent_p = true; else if (TREE_CODE (fn) == TEMPLATE_DECL && dependent_template_p (fn)) dependent_p = true; fns = OVL_NEXT (fns); } } /* If the name was dependent on a template parameter, we will resolve the name at instantiation time. */ if (dependent_p) { /* Create a SCOPE_REF for qualified names, if the scope is dependent. */ if (scope) { if (TYPE_P (scope)) { if (address_p && done) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else { tree type = NULL_TREE; if (DECL_P (decl) && !dependent_scope_p (scope)) type = TREE_TYPE (decl); decl = build_qualified_name (type, scope, id_expression, template_p); } } if (TREE_TYPE (decl)) decl = convert_from_reference (decl); return decl; } /* A TEMPLATE_ID already contains all the information we need. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR) return id_expression; *idk = CP_ID_KIND_UNQUALIFIED_DEPENDENT; /* If we found a variable, then name lookup during the instantiation will always resolve to the same VAR_DECL (or an instantiation thereof). */ if (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) { mark_used (decl); return convert_from_reference (decl); } /* The same is true for FIELD_DECL, but we also need to make sure that the syntax is correct. */ else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); return decl; } return id_expression; } if (TREE_CODE (decl) == NAMESPACE_DECL) { error ("use of namespace %qD as expression", decl); return error_mark_node; } else if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } else if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && integral_constant_expression_p && ! decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && ! builtin_valid_in_constant_expr_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } tree wrap; if (VAR_P (decl) && !cp_unevaluated_operand && !processing_template_decl && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) && DECL_THREAD_LOCAL_P (decl) && (wrap = get_tls_wrapper_fn (decl))) { /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = build_cxx_call (wrap, 0, NULL, tf_warning_or_error); } else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && variable_template_p (TREE_OPERAND (decl, 0))) { decl = finish_template_variable (decl); mark_used (decl); } else if (scope) { decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { tree first_fn; first_fn = get_first_fn (decl); if (TREE_CODE (first_fn) == TEMPLATE_DECL) first_fn = DECL_TEMPLATE_RESULT (first_fn); if (!really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } /* Handle references (c++/56130). */ tree t = REFERENCE_REF_P (decl) ? TREE_OPERAND (decl, 0) : decl; if (TREE_DEPRECATED (t)) warn_deprecated_use (t, NULL_TREE); return decl; } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } complete_type (type); if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type */ tree calculate_direct_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; vec<tree, va_gc> *base_binfos; tree binfo; unsigned i; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) { TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); } return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (*data, BINFO_TYPE (binfo)); } return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); /* Now add non-virtual base classes in order of construction */ dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; vec<tree, va_gc> *nonvbases; tree binfo; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { vec<tree, va_gc> *vbase_bases; vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); release_tree_vector (vbase_bases); } /* Now for the non-virtual bases */ nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); release_tree_vector (nonvbases); /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) { TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } release_tree_vector (vector); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("Parameter pack __bases only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build1 (OFFSETOF_EXPR, size_type_node, expr); SET_EXPR_LOCATION (expr, loc); return expr; } if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE || TREE_TYPE (expr) == unknown_type_node) { if (INDIRECT_REF_P (expr)) error ("second operand of %<offsetof%> is neither a single " "identifier nor a sequence of member accesses and " "array references"); else { if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); error ("cannot apply %<offsetof%> to member function %qD", expr); } return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == COMPONENT_REF) { tree object = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (object), object)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (object)) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (object)) && cp_unevaluated_operand == 0) pedwarn (loc, OPT_Winvalid_offsetof, "offsetof within non-standard-layout type %qT is undefined", TREE_TYPE (object)); } return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_EXPR_LIST_INIT_P (call_expr) = CALL_EXPR_LIST_INIT_P (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is an instantiation of a constexpr function, keep DECL_SAVED_TREE for explain_invalid_constexpr_fn. */ if (!is_instantiation_of_constexpr (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } struct nrv_data { nrv_data () : visited (37) {} tree var; tree result; hash_table<pointer_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { struct nrv_data *dp = (struct nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { struct nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one knonwn not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (type_dependent_expression_p (t)) return NULL_TREE; if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } t = convert_from_reference (t); return t; } ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (length != NULL_TREE) { if (!integer_nonzerop (length)) maybe_zero_len = true; if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (type)), size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) maybe_zero_len = true; else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TREE_CODE (type) == POINTER_TYPE) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree> types; tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types, maybe_zero_len, first_non_one); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = OMP_CLAUSE_DECL (c); tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); OMP_CLAUSE_DECL (c) = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && length && integer_nonzerop (length)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) return false; tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); if (!cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!POINTER_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (TREE_CODE (TREE_TYPE (ptr)) == REFERENCE_TYPE && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, GOMP_MAP_POINTER); OMP_CLAUSE_DECL (c3) = ptr; OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ansi_opname (reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), false, false); tree fns = id; if (id && is_overloaded_fn (id)) id = get_fns (id); for (; id; id = OVL_NEXT (id)) { tree fndecl = OVL_CURRENT (id); if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) break; } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } if (id == NULL_TREE && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#D", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ void cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return; } } else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return; } else if (TREE_CODE (type) == REFERENCE_TYPE) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "const, volatile or __restrict qualified type %qT in " "%<#pragma omp declare reduction%>", type); return; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; tree type = TREE_TYPE (t); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TREE_CODE (type) == ARRAY_TYPE || TYPE_READONLY (type)) { error ("%qE has invalid type for %<reduction%>", t); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) return false; tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (TREE_TYPE (t)); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; id = OVL_CURRENT (id); mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) != REFERENCE_TYPE) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3]))) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error ("user defined reduction not found for %qD", t); return true; } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head; tree c, t, *pc; bool branch_seen = false; bool copyprivate_seen = false; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: goto check_dup_generic; case OMP_CLAUSE_PRIVATE: goto check_dup_generic; case OMP_CLAUSE_REDUCTION: goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: t = OMP_CLAUSE_DECL (c); if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE) { error ("linear clause applied to non-integral non-pointer " "variable with %qT type", TREE_TYPE (t)); remove = true; break; } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { if (TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL) t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE) { t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, OMP_CLAUSE_DECL (c), t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, t, OMP_CLAUSE_DECL (c)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)), t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_FIRSTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<firstprivate%>", t); else error ("%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_LASTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<lastprivate%>", t); else error ("%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_NUM_THREADS: t = OMP_CLAUSE_NUM_THREADS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("num_threads expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; } break; case OMP_CLAUSE_SCHEDULE: t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && (OMP_CLAUSE_SCHEDULE_KIND (c) != OMP_CLAUSE_SCHEDULE_CILKFOR) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_CILKFOR) { t = convert_to_integer (long_integer_type_node, t); if (t == error_mark_node) { remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%qs length expression must be positive constant" " integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_NUM_TEAMS: t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<num_teams%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_VECTOR_LENGTH_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t; break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<aligned%> clause", t); else error ("%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE || (!POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error ("%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%<aligned%> clause alignment expression must be " "positive constant integer expression"); remove = true; } } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<depend%> clause", t); else error ("%qE is not a variable in %<depend%> clause", t); remove = true; } else if (!processing_template_decl && !cxx_mark_addressable (t)) remove = true; break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) break; if (DECL_P (t)) error ("%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error ("%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!processing_template_decl && TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) && !type_dependent_expression_p (t) && !cp_omp_mappable_type ((TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion clauses", t); else error ("%qD appears more than once in map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not an argument in %<uniform%> clause", t); else error ("%qE is not an argument in %<uniform%> clause", t); remove = true; break; } goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE__CILK_FOR_COUNT_: break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error ("%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_non_reference = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_non_reference = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_non_reference = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_non_reference = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: need_implicitly_determined = true; break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t)) { error ("%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_non_reference || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE && need_complete_non_reference) { error ("%qE has reference type for %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: /* const vars may be specified in firstprivate clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) break; share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error ("%qE is predetermined %qs for %qs", t, share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && TREE_CODE (inner_type) == REFERENCE_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) { retrofit_lang_decl (v); /* Make sure that DECL_DISCRIMINATOR_P continues to be true after the allocation of the lang_decl structure. */ if (DECL_DISCRIMINATOR_P (v)) DECL_LANG_SPECIFIC (v)->u.base.u2sel = 1; } if (! DECL_THREAD_LOCAL_P (v)) { set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_DATA. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_KERNELS, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_KERNELS. */ tree finish_oacc_kernels (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_KERNELS); TREE_TYPE (stmt) = void_type_node; OACC_KERNELS_CLAUSES (stmt) = clauses; OACC_KERNELS_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_PARALLEL, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_PARALLEL. */ tree finish_oacc_parallel (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_PARALLEL); TREE_TYPE (stmt) = void_type_node; OACC_PARALLEL_CLAUSES (stmt) = clauses; OACC_PARALLEL_BODY (stmt) = block; return add_stmt (stmt); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree clauses, tree *lastp) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_if_not_in_template (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) break; decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); finish_expr_stmt (iter_incr); OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; *lastp = last; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr, orig_decl = NULL_TREE, block = NULL_TREE; tree last = NULL_TREE; location_t elocus; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (code == CILK_FOR && i == 0) orig_decl = decl; if (handle_omp_for_class_iterator (i, locus, declv, initv, condv, incrv, &body, &pre_body, clauses, &last)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl) { init = fold_build_cleanup_point_expr (TREE_TYPE (init), init); init = cp_build_modify_expr (decl, NOP_EXPR, init, tf_warning_or_error); } else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond) && !processing_template_decl) { tree t = TREE_OPERAND (cond, 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (cond, 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (IS_EMPTY_STMT (pre_body)) pre_body = NULL; if (code == CILK_FOR && !processing_template_decl) block = push_stmt_list (); omp_for = c_finish_omp_for (locus, code, declv, initv, condv, incrv, body, pre_body); if (omp_for == NULL) { if (block) pop_stmt_list (block); return NULL; } for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; if (block) { tree omp_par = make_node (OMP_PARALLEL); TREE_TYPE (omp_par) = void_type_node; OMP_PARALLEL_CLAUSES (omp_par) = NULL_TREE; tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = pop_stmt_list (block); OMP_PARALLEL_BODY (omp_par) = bind; if (OMP_FOR_PRE_BODY (omp_for)) { add_stmt (OMP_FOR_PRE_BODY (omp_for)); OMP_FOR_PRE_BODY (omp_for) = NULL_TREE; } init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0); decl = TREE_OPERAND (init, 0); cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree t = TREE_OPERAND (cond, 1), c, clauses, *pc; clauses = OMP_FOR_CLAUSES (omp_for); OMP_FOR_CLAUSES (omp_for) = NULL_TREE; for (pc = &clauses; *pc; ) if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_SCHEDULE) { gcc_assert (OMP_FOR_CLAUSES (omp_for) == NULL_TREE); OMP_FOR_CLAUSES (omp_for) = *pc; *pc = OMP_CLAUSE_CHAIN (*pc); OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (omp_for)) = NULL_TREE; } else { gcc_assert (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE); pc = &OMP_CLAUSE_CHAIN (*pc); } if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (cond, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (cond, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (TREE_CODE (incr) == MODIFY_EXPR) { t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } t = TREE_OPERAND (init, 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (init, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (init, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; c = build_omp_clause (input_location, OMP_CLAUSE__CILK_FOR_COUNT_); OMP_CLAUSE_OPERAND (c, 0) = cilk_for_number_of_iterations (omp_for); OMP_CLAUSE_CHAIN (c) = clauses; OMP_PARALLEL_CLAUSES (omp_par) = finish_omp_clauses (c); add_stmt (omp_par); return omp_par; } else if (code == CILK_FOR && processing_template_decl) { tree c, clauses = OMP_FOR_CLAUSES (omp_for); if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } OMP_FOR_CLAUSES (omp_for) = clauses; } return omp_for; } void finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool seq_cst) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { tree tem = rhs; rhs = rhs1; rhs1 = tem; swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (input_location, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs), OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs1), code, orig_lhs1, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, integer_zero_node, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; } finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_flush (void) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector (); tree ifc = find_omp_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc)); ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc), build_zero_cst (type)); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); /* This may not be true when the STATEMENT_LIST is empty. */ if (EXPR_P (body)) SET_EXPR_LOCATION (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); if (EXPR_P (expr)) SET_EXPR_LOCATION (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (type_dependent_expression_p (condition) || value_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Fold the expression and convert it to a boolean value. */ condition = instantiate_non_dependent_expr (condition); condition = cp_convert (boolean_type_node, condition, tf_warning_or_error); condition = maybe_constant_value (condition); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) /* Report the error. */ error ("static assertion failed: %s", TREE_STRING_POINTER (message)); else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_potential_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to decltype must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr); if (invalid_nonstatic_memfn_p (expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("decltype cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr)) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (TREE_CODE (type) != REFERENCE_TYPE); /* For vector types, pick a non-opaque variant. */ if (TREE_CODE (type) == VECTOR_TYPE) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns; if (assign_p) { int ix; ix = lookup_fnfields_1 (type, ansi_assopname (NOP_EXPR)); if (ix < 0) return false; fns = (*CLASSTYPE_METHOD_VEC (type))[ix]; } else if (TYPE_HAS_COPY_CTOR (type)) { /* If construction of the copy constructor was postponed, create it now. */ if (CLASSTYPE_LAZY_COPY_CTOR (type)) lazily_declare_fn (sfk_copy_constructor, type); if (CLASSTYPE_LAZY_MOVE_CTOR (type)) lazily_declare_fn (sfk_move_constructor, type); fns = CLASSTYPE_CONSTRUCTORS (type); } else return false; for (; fns; fns = OVL_NEXT (fns)) { tree fn = OVL_CURRENT (fns); if (assign_p) { if (copy_fn_p (fn) == 0) continue; } else if (copy_fn_p (fn) <= 0) continue; maybe_instantiate_noexcept (fn); if (!TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } return true; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && (maybe_instantiate_noexcept (t), TYPE_NOTHROW_P (TREE_TYPE (t))))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_IS_ABSTRACT: return (ABSTRACT_CLASS_TYPE_P (type1)); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return (NON_UNION_CLASS_TYPE_P (type1)); case CPTK_IS_EMPTY: return (NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1)); case CPTK_IS_ENUM: return (type_code1 == ENUMERAL_TYPE); case CPTK_IS_FINAL: return (CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1)); case CPTK_IS_LITERAL_TYPE: return (literal_type_p (type1)); case CPTK_IS_POD: return (pod_type_p (type1)); case CPTK_IS_POLYMORPHIC: return (CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1)); case CPTK_IS_STD_LAYOUT: return (std_layout_type_p (type1)); case CPTK_IS_TRIVIAL: return (trivial_type_p (type1)); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return (trivially_copyable_p (type1)); case CPTK_IS_UNION: return (type_code1 == UNION_TYPE); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: break; default: gcc_unreachable (); } return (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (LAMBDA_FUNCTION_P (fco)) { tree lambda = CLASSTYPE_LAMBDA_EXPR (current_class_type); LAMBDA_EXPR_RETURN_TYPE (lambda) = return_type; } if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = mangle_conv_op_name_for_type (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { if (!VOID_TYPE_P (TREE_TYPE (result))) complete_type_or_else (TREE_TYPE (result), NULL_TREE); bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); /* FIXME do lookup instead of list walk? */ tree cap = value_member (decl, LAMBDA_EXPR_CAPTURE_LIST (lam)); tree type; if (cap) type = TREE_TYPE (TREE_PURPOSE (cap)); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (TREE_CODE (type) != REFERENCE_TYPE) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (TREE_CODE (type) != REFERENCE_TYPE) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } #include "gt-cp-semantics.h"